repo_name
stringlengths
5
114
repo_url
stringlengths
24
133
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
directory_id
stringlengths
40
40
branch_name
stringclasses
209 values
visit_date
timestamp[ns]
revision_date
timestamp[ns]
committer_date
timestamp[ns]
github_id
int64
9.83k
683M
star_events_count
int64
0
22.6k
fork_events_count
int64
0
4.15k
gha_license_id
stringclasses
17 values
gha_created_at
timestamp[ns]
gha_updated_at
timestamp[ns]
gha_pushed_at
timestamp[ns]
gha_language
stringclasses
115 values
files
listlengths
1
13.2k
num_files
int64
1
13.2k
fuzzymango/postal
https://github.com/fuzzymango/postal
619a6fb19d179d30cf580d1ef3f44803a3011070
5223127d291333c08f92288a9d97d0cc332c4d80
a83f3f5e32bb54c3012b4f3661e776bab2a37af8
refs/heads/main
2023-04-05T08:53:21.662167
2021-04-22T16:38:36
2021-04-22T16:38:36
360,576,695
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.7755775451660156, "alphanum_fraction": 0.7772276997566223, "avg_line_length": 27.904762268066406, "blob_id": "bcfebdea804d43e5157d543950ca907253b32667", "content_id": "73c3700f9e202144a006fc153d168c81acfc4432", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 606, "license_type": "no_license", "max_line_length": 72, "num_lines": 21, "path": "/postal_1.0.0/postal_1.0.0/connect_to_parent.py", "repo_name": "fuzzymango/postal", "src_encoding": "UTF-8", "text": "import nuke\nimport nukescripts\nimport postal_main\n\ndef connect_to_parent(thisNode):\n\tprint 'connecting to parent!'\n\tparentName = thisNode['parentID'].value()\n\tparentNode = nuke.toNode(parentName)\n\tthisNode.setInput(0, parentNode)\n\tthisNode['hide_input'].setValue(parentNode['hideInputTracker'].value())\n\tthisNode['parentLabel'].setValue(parentNode['parentLabel'].value())\n\n\n\tlistOfChildren = postal_main.get_child_list(parentNode)\n\n\n\tchildName = thisNode['name'].value()\n\tif childName not in listOfChildren:\n\t\tlistOfChildren.append(childName)\n\n\tpostal_main.set_child_list(parentNode, listOfChildren, False)" } ]
1
bvn-architecture/vega
https://github.com/bvn-architecture/vega
11e4fadd08acc5ef4a74a090c04e68dede6c23bc
7017b4a58577f79fa83897cdbe13a22e474cde59
15397b6a35a6762cc79a969a62ea461b41f31ef2
refs/heads/master
2020-07-08T13:25:58.283273
2019-08-30T08:23:57
2019-08-30T08:23:57
203,687,753
0
0
null
2019-08-22T01:00:22
2019-08-21T22:22:12
2019-08-21T16:10:41
null
[ { "alpha_fraction": 0.5874655842781067, "alphanum_fraction": 0.5929751992225647, "avg_line_length": 31.266666412353516, "blob_id": "0714d54a6af07aecfd1aaeb2fb0b46cf5a9fef30", "content_id": "81ed4d9a680001e8b2945d3fb14b3fe81e9228e7", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1452, "license_type": "permissive", "max_line_length": 94, "num_lines": 45, "path": "/docs/data/manipulate_json.py", "repo_name": "bvn-architecture/vega", "src_encoding": "UTF-8", "text": "import json\nimport random\n\nflare_type_empty_list = []\nwith open('C:/Users/mitch/Documents/GitHub/vega/docs/data/heirarchy_data.json') as f:\n root = json.load(f)\n flare_type_parent_child = []\n flare_type_dependencies = []\n id = 1\n flare_type_parent_child.append({\n \"id\": id,\n \"name\": root[\"name\"],\n \"parent\": \"\"\n })\n children = [(id, x) for x in root[\"children\"]]\n id += 1\n while children:\n parent_id, child = children.pop()\n flare_type_parent_child.append({\n \"id\": id,\n \"name\": child[\"name\"],\n \"parent\": parent_id\n })\n if child.get(\"children\"):\n children += [(id, x) for x in child[\"children\"]]\n id += 1\n\n# print(flare_type_parent_child)\nwith open(\"C:/Users/mitch/Documents/GitHub/vega/docs/data/heirarchy_data_mod.json\", \"w\") as f:\n json.dump(flare_type_parent_child, f)\n\nprint (flare_type_parent_child)\n\n# Create dependencies file\nids = [x['id'] for x in flare_type_parent_child]\ncount = 0\nwhile count < 300:\n a = random.choice(flare_type_parent_child)\n b = random.choice(flare_type_parent_child)\n if a[\"parent\"] != b[\"id\"] and a[\"id\"] != b[\"parent\"] and a[\"id\"] != b[\"id\"]:\n flare_type_dependencies.append({\"source\": a[\"id\"], \"target\": b[\"id\"]})\n count += 1\n\nwith open(\"C:/Users/mitch/Documents/GitHub/vega/docs/data/heirarchy_data_dep.json\", \"w\") as f:\n json.dump(flare_type_dependencies, f)\n" } ]
1
GreekTitan/HappyMonkAssignment
https://github.com/GreekTitan/HappyMonkAssignment
bb241c4da9e3b1079185921a4acd81125041acad
019abb285c8c5f319f34eab3e3e0a2d2e9523639
920d24db181e1715956e129b84dd4ec417763323
refs/heads/master
2023-06-03T04:17:56.924120
2021-06-30T16:41:33
2021-06-30T16:41:33
381,762,548
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4820222556591034, "alphanum_fraction": 0.7782441973686218, "avg_line_length": 27.53645896911621, "blob_id": "25fa77417c73cbcf82a42d47ed143b9bd6820e40", "content_id": "a3990193e4cff3d676c9eec2a3ef3609d1d66258", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 5479, "license_type": "no_license", "max_line_length": 55, "num_lines": 192, "path": "/requirements.txt", "repo_name": "GreekTitan/HappyMonkAssignment", "src_encoding": "UTF-8", "text": "# This file may be used to create an environment using:\n# $ conda create --name <env> --file <this file>\n# platform: win-64\n_tflow_select=2.1.0=gpu\nabsl-py=0.13.0=py37haa95532_0\naiohttp=3.7.4=py37h2bbff1b_1\nanyio=3.2.1=py37h03978a9_0\nargon2-cffi=20.1.0=py37hcc03f2d_2\nastor=0.8.1=py37haa95532_0\nasync-timeout=3.0.1=py37haa95532_0\nasync_generator=1.10=py_0\nattrs=21.2.0=pyhd3eb1b0_0\nbabel=2.9.1=pyh44b312d_0\nbackcall=0.2.0=pyh9f0ad1d_0\nbackports=1.0=py_2\nbackports.functools_lru_cache=1.6.4=pyhd8ed1ab_0\nblas=1.0=mkl\nbleach=3.3.0=pyh44b312d_0\nblinker=1.4=py37haa95532_0\nbrotlipy=0.7.0=py37h2bbff1b_1003\nca-certificates=2021.5.25=haa95532_1\ncached-property=1.5.2=py_0\ncachetools=4.2.2=pyhd3eb1b0_0\ncertifi=2021.5.30=py37haa95532_0\ncffi=1.14.5=py37hcd4344a_0\nchardet=3.0.4=py37haa95532_1003\nclick=8.0.1=pyhd3eb1b0_0\ncolorama=0.4.4=pyh9f0ad1d_0\ncoverage=5.5=py37h2bbff1b_2\ncryptography=3.4.7=py37h71e12ea_0\ncudatoolkit=10.1.243=h74a9793_0\ncudnn=7.6.5=cuda10.1_0\ncycler=0.10.0=py_2\ncython=0.29.23=py37hd77b12b_0\ndecorator=5.0.9=pyhd8ed1ab_0\ndefusedxml=0.7.1=pyhd8ed1ab_0\nentrypoints=0.3=pyhd8ed1ab_1003\nfreetype=2.10.4=h546665d_1\ngast=0.2.2=py37_0\ngoogle-auth=1.32.0=pyhd3eb1b0_0\ngoogle-auth-oauthlib=0.4.4=pyhd3eb1b0_0\ngoogle-pasta=0.2.0=py_0\ngrpcio=1.36.1=py37hc60d5dd_1\nh5py=3.2.1=py37h3de5c98_0\nhdf5=1.10.6=h7ebc959_0\nicc_rt=2019.0.0=h0cc432a_1\nicu=68.1=h0e60522_0\nidna=2.10=pyhd3eb1b0_0\nimportlib-metadata=3.10.0=py37haa95532_0\nintel-openmp=2021.2.0=haa95532_616\nipykernel=5.5.5=py37h7813e69_0\nipython=7.25.0=py37h7813e69_0\nipython_genutils=0.2.0=py_1\njbig=2.1=h8d14728_2003\njedi=0.18.0=py37h03978a9_2\njinja2=3.0.1=pyhd8ed1ab_0\njoblib=1.0.1=pyhd3eb1b0_0\njpeg=9d=h8ffe710_0\njson5=0.9.5=pyh9f0ad1d_0\njsonschema=3.2.0=pyhd8ed1ab_3\njupyter_client=6.1.12=pyhd8ed1ab_0\njupyter_core=4.7.1=py37h03978a9_0\njupyter_server=1.9.0=pyhd8ed1ab_0\njupyterlab=3.0.16=pyhd8ed1ab_0\njupyterlab_pygments=0.1.2=pyh9f0ad1d_0\njupyterlab_server=2.6.0=pyhd8ed1ab_0\nkeras-applications=1.0.8=py_1\nkeras-base=2.3.1=py37_0\nkeras-gpu=2.3.1=0\nkeras-preprocessing=1.1.2=pyhd3eb1b0_0\nkiwisolver=1.3.1=py37h8c56517_1\nlcms2=2.12=h2a16943_0\nlerc=2.2.1=h0e60522_0\nlibclang=11.1.0=default_h5c34c98_1\nlibdeflate=1.7=h8ffe710_5\nlibpng=1.6.37=h1d00b33_2\nlibprotobuf=3.14.0=h23ce68f_0\nlibsodium=1.0.18=h8d14728_1\nlibtiff=4.3.0=h0c97f57_1\nlz4-c=1.9.3=h8ffe710_0\nm2w64-gcc-libgfortran=5.3.0=6\nm2w64-gcc-libs=5.3.0=7\nm2w64-gcc-libs-core=5.3.0=7\nm2w64-gmp=6.1.0=2\nm2w64-libwinpthread-git=5.0.0.4634.697f757=2\nmarkdown=3.3.4=py37haa95532_0\nmarkupsafe=2.0.1=py37hcc03f2d_0\nmatplotlib=3.4.2=py37h03978a9_0\nmatplotlib-base=3.4.2=py37h0d1fb12_0\nmatplotlib-inline=0.1.2=pyhd8ed1ab_2\nmistune=0.8.4=py37hcc03f2d_1003\nmkl=2021.2.0=haa95532_296\nmkl-service=2.3.0=py37h2bbff1b_1\nmkl_fft=1.3.0=py37h277e83a_2\nmkl_random=1.2.1=py37hf11a4ad_2\nmsys2-conda-epoch=20160418=1\nmultidict=5.1.0=py37h2bbff1b_2\nnbclassic=0.3.1=pyhd8ed1ab_1\nnbclient=0.5.3=pyhd8ed1ab_0\nnbconvert=6.1.0=py37h03978a9_0\nnbformat=5.1.3=pyhd8ed1ab_0\nnest-asyncio=1.5.1=pyhd8ed1ab_0\nnotebook=6.4.0=pyha770c72_0\nnumpy=1.20.2=py37ha4e8547_0\nnumpy-base=1.20.2=py37hc2deb75_0\noauthlib=3.1.0=py_0\nolefile=0.46=pyh9f0ad1d_1\nonnx=1.7.0=py37_1\nopenjpeg=2.4.0=hb211442_1\nopenssl=1.1.1k=h2bbff1b_0\nopt_einsum=3.3.0=pyhd3eb1b0_1\npackaging=20.9=pyh44b312d_0\npandoc=2.14.0.3=h8ffe710_0\npandocfilters=1.4.2=py_1\nparso=0.8.2=pyhd8ed1ab_0\npickleshare=0.7.5=py_1003\npillow=8.2.0=py37h96663a1_1\npip=21.1.2=py37haa95532_0\nprometheus_client=0.11.0=pyhd8ed1ab_0\nprompt-toolkit=3.0.19=pyha770c72_0\nprotobuf=3.14.0=py37hd77b12b_1\npyasn1=0.4.8=py_0\npyasn1-modules=0.2.8=py_0\npycparser=2.20=py_2\npygments=2.9.0=pyhd8ed1ab_0\npyjwt=1.7.1=py37_0\npyopenssl=20.0.1=pyhd3eb1b0_1\npyparsing=2.4.7=pyh9f0ad1d_0\npyqt=5.12.3=py37h03978a9_7\npyqt-impl=5.12.3=py37hf2a7229_7\npyqt5-sip=4.19.18=py37hf2a7229_7\npyqtchart=5.12=py37hf2a7229_7\npyqtwebengine=5.12.1=py37hf2a7229_7\npyreadline=2.1=py37_1\npyrsistent=0.17.3=py37hcc03f2d_2\npysocks=1.7.1=py37_1\npython=3.7.10=h6244533_0\npython-dateutil=2.8.1=py_0\npython_abi=3.7=2_cp37m\npytz=2021.1=pyhd8ed1ab_0\npywin32=300=py37hcc03f2d_0\npywinpty=0.5.7=py37_0\npyyaml=5.4.1=py37h2bbff1b_1\npyzmq=22.1.0=py37hcce574b_0\nqt=5.12.9=h5909a2a_4\nrequests=2.25.1=pyhd3eb1b0_0\nrequests-oauthlib=1.3.0=py_0\nrequests-unixsocket=0.2.0=py_0\nrsa=4.7.2=pyhd3eb1b0_1\nscikit-learn=0.24.2=py37hf11a4ad_1\nscipy=1.6.2=py37h66253e8_1\nsend2trash=1.7.1=pyhd8ed1ab_0\nsetuptools=52.0.0=py37haa95532_0\nsix=1.16.0=pyhd3eb1b0_0\nsniffio=1.2.0=py37h03978a9_1\nsqlite=3.36.0=h2bbff1b_0\ntensorboard=2.4.0=pyhc547734_0\ntensorboard-plugin-wit=1.6.0=py_0\ntensorflow=2.1.0=gpu_py37h7db9008_0\ntensorflow-addons=0.9.1=py37_1\ntensorflow-base=2.1.0=gpu_py37h55f5790_0\ntensorflow-estimator=2.5.0=pyh7b7c402_0\ntensorflow-gpu=2.1.0=h0d30ee6_0\ntermcolor=1.1.0=py37haa95532_1\nterminado=0.9.4=py37haa95532_0\ntestpath=0.5.0=pyhd8ed1ab_0\nthreadpoolctl=2.1.0=pyh5ca1d4c_0\ntk=8.6.10=h8ffe710_1\ntornado=6.1=py37hcc03f2d_1\ntraitlets=5.0.5=py_0\ntypeguard=2.7.0=py37_0\ntyping-extensions=3.7.4.3=hd3eb1b0_0\ntyping_extensions=3.7.4.3=pyh06a4308_0\nurllib3=1.26.4=pyhd3eb1b0_0\nvc=14.2=h21ff451_1\nvs2015_runtime=14.27.29016=h5e58377_2\nwcwidth=0.2.5=pyh9f0ad1d_2\nwebencodings=0.5.1=py_1\nwebsocket-client=0.57.0=py37h03978a9_4\nwerkzeug=0.16.1=py_0\nwheel=0.36.2=pyhd3eb1b0_0\nwin_inet_pton=1.1.0=py37haa95532_0\nwincertstore=0.2=py37_0\nwinpty=0.4.3=4\nwrapt=1.12.1=py37he774522_1\nxz=5.2.5=h62dcd97_1\nyaml=0.2.5=he774522_0\nyarl=1.6.3=py37h2bbff1b_0\nzeromq=4.3.4=h0e60522_0\nzipp=3.4.1=pyhd3eb1b0_0\nzlib=1.2.11=h62dcd97_4\nzstd=1.5.0=h6255e5f_0\n" }, { "alpha_fraction": 0.6219218969345093, "alphanum_fraction": 0.6459459662437439, "avg_line_length": 21.78082275390625, "blob_id": "c1f5cba8883c1a3e34da2a706a1ee52f8c44d239", "content_id": "f10d50462ce58389b030c1cf0378d8768de9026a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3330, "license_type": "no_license", "max_line_length": 114, "num_lines": 146, "path": "/Assignment.py", "repo_name": "GreekTitan/HappyMonkAssignment", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation,Flatten\nfrom numpy import loadtxt\nimport numpy as np\nimport keras\nfrom sklearn.metrics import f1_score\n\n\n# In[2]:\n\n\nmnist = keras.datasets.mnist\n\n\n# In[3]:\n\n\n(x_train, y_train),(x_test, y_test) = mnist.load_data()\n\n\n# In[4]:\n\n\nplt.imshow(x_train[0],cmap=plt.cm.binary)\nplt.show()\n\n\n# In[5]:\n\n\n#normalizing data\nx_train = keras.utils.normalize(x_train, axis=1)\nx_test = keras.utils.normalize(x_test, axis=1)\nx_train.shape\n\n\n# In[6]:\n\n# normal callbacks\nclass CustomCallback(keras.callbacks.Callback):\n def on_train_begin(self,logs=None):\n print(\"Training started\")\n \n def on_train_end(self,logs=None):\n print(\"Training ended\")\n\n\n# In[7]:\n\n# Creates our custom activation layer\nclass SimpleDense(keras.layers.Layer):\n\n def __init__(self, units=32,**kwargs):\n super(SimpleDense, self).__init__(**kwargs)\n self.units = units\n\n def build(self, input_shape):\n global layer\n layer = layer+1\n self.w = self.add_weight(shape=(input_shape[-1], self.units),\n initializer='random_uniform',\n trainable=True)\n self.b = self.add_weight(shape=(self.units,),\n initializer='random_normal',\n trainable=True)\n self.add_metric(self.w,name='k1_'+str(layer))\n self.add_metric(self.b,name='k0_'+str(layer)) \n super(SimpleDense,self).build(input_shape=(input_shape[-1],self.units))\n\n def call(self, inputs):\n return tf.matmul(inputs, self.w) + self.b\n\n\n# In[9]:\n\n\nlayer = 0\nmodel = Sequential()\nmodel.add(Flatten(input_shape=(28,28)))\nmodel.add(Dense(30))\nmodel.add(SimpleDense(30))\nmodel.add(Dense(10, activation=tf.nn.softmax))\nmodel.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\nmodel.summary()\n\n\n# In[10]:\n\n\nhistory = model.fit(x_train, y_train, epochs=25, validation_data = (x_test,y_test),callbacks = [CustomCallback()])\n\n\n# In[11]:\n\n\ny_pred1 = model.predict(x_test)\ny_preds = np.array([])\nfor i in range(len(y_test)):\n y_preds = np.append(y_preds,np.argmax(y_pred1[i]))\nprint(\"The F1 Score for the current model is = \",f1_score(y_test,y_preds,average=\"macro\"))\n\n\n# In[12]:\n\n\nfig,axis = plt.subplots(1,3,figsize=(20,5))#,sharex=True, sharey=True)\nfig.set_tight_layout({'pad':2})\n#accuracy\naxis[0].plot(history.history['accuracy'])\naxis[0].plot(history.history['val_accuracy'])\naxis[0].set_title('Train vs Test accuracy')\naxis[0].set_ylabel('accuracy')\naxis[0].set_xlabel('epoch')\naxis[0].legend(['train','test'], loc='upper left')\n\n#loss\naxis[1].plot(history.history['loss'])\naxis[1].plot(history.history['val_loss'])\naxis[1].set_title('Train vs Test loss')\naxis[1].set_ylabel('loss')\naxis[1].set_xlabel('epoch')\naxis[1].legend(['train','test'], loc='upper right')\n\n#k0 and k1 updates first layer\naxis[2].plot(history.history['k0_1'])\naxis[2].plot(history.history['k1_1'])\naxis[2].set_title('k0 vs k1 - Layer 1')\naxis[2].set_ylabel('parmaeter')\naxis[2].set_xlabel('epoch')\naxis[2].legend(['k0 Layer 1','k1 Layer 1'], loc='upper left')\n\nplt.show()\nfig.savefig('Metrics.png')\n\n\n# In[ ]:\n\n\n\n\n" } ]
2
Shane-Hsieh/Twitter-Bot
https://github.com/Shane-Hsieh/Twitter-Bot
2a87611adba1bed0b2dd1564adc49afc7d53d825
b71337f7dbf877c31ce153c25fb3d62416480b47
b9d48d4e4ceeff56fa127db71d91ef7077f35c7a
refs/heads/main
2023-02-09T18:37:52.493239
2021-01-05T05:03:26
2021-01-05T05:03:26
326,891,382
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6918604373931885, "alphanum_fraction": 0.6965116262435913, "avg_line_length": 25.875, "blob_id": "0fffc1565df6d9e0a5122f380962b1188cee8ec5", "content_id": "dd0fb439f43da3c4b7d62e3a80415983ef834769", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 860, "license_type": "permissive", "max_line_length": 84, "num_lines": 32, "path": "/Twitter-Bot.py", "repo_name": "Shane-Hsieh/Twitter-Bot", "src_encoding": "UTF-8", "text": "import tweepy\nimport time\n\n// You just enter your tokens from the API here, blank because I'm not sharing mine.\nconsumer_key = ''\nconsumer_secret = ''\naccess_token = ''\naccess_token_secret = ''\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\napi = tweepy.API(auth)\n\ndef limit_handle(cursor):\n while True:\n try:\n yield cursor.next()\n except tweepy.RateLimitError:\n time.sleep(1000)\n\nfor follower in limit_handle(tweepy.Cursor(api.followers).items()):\n if follower.name == 'Usernamehere':\n print(follower.name)\n follower.follow()\n\nfor tweet in tweepy.Cursor(api.search, search).items(numberOfTweets):\n try:\n tweet.favorite()\n print('Retweeted the tweet')\n except tweepy.TweepError as e:\n print(e.reason)\n except StopIteration:\n break\n" } ]
1
easemob/im_flutter_sdk
https://github.com/easemob/im_flutter_sdk
480f84f0cc762987689c879be426b06ac7ac4506
909a18efd3f229efa1f72b228f0f07d2d359c517
e5ab5de40fd122f968bb05eff743656348147d05
refs/heads/flutter2_stable
2023-08-14T19:19:28.424245
2023-08-10T08:12:53
2023-08-10T08:12:53
206,222,875
249
121
MIT
2019-09-04T03:26:41
2023-07-12T09:57:43
2023-08-10T08:12:53
Dart
[ { "alpha_fraction": 0.634562075138092, "alphanum_fraction": 0.6367793679237366, "avg_line_length": 32.33025360107422, "blob_id": "556811e0f13d571cf6ebb964c3bfcd8d37261d6e", "content_id": "79ec5b19cb68c95413516c4c5ba2e4e5a96a9015", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14998, "license_type": "permissive", "max_line_length": 140, "num_lines": 433, "path": "/update_to_agora.py", "repo_name": "easemob/im_flutter_sdk", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: UTF-8 -*-\nimport os\nimport shutil\nimport re\nimport sys\nimport getopt\n\n# 使用方法\n# python update_to_agora.py -s testApplication -t agora-brand\n# 执行上述命令,指定要复制的文件夹和目标文件夹的名字\n#\n# 修改 FOLDER_dict中的字符串用作文件路径的修改\n# 修改 UPDATE_dict中的字符串用作文件中内容的修改\n# walkFile 中判断文件后缀名,按需要修改\n#\n\n\ndef main(argv):\n sourcePath = ''\n targetPath = ''\n\n try:\n opts, args = getopt.getopt(\n argv[1:], \"-h-s:-t:\", [\"source=\", \"target=\"])\n\n except getopt.GetoptError:\n print 'update_to_agora.py -s <source> -t <target>'\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n print 'update_to_agora.py -s <source> -t <target>'\n sys.exit()\n if opt in (\"-s\", \"--source\"):\n sourcePath = arg\n print 'source folder: ', sourcePath\n if opt in (\"-t\", \"--target\"):\n targetPath = arg\n print 'target folder: ', targetPath\n\n if(sourcePath == '' or targetPath == ''):\n print 'need source and target path.'\n return\n\n # 当前文件路径\n # print(os.path.realpath(__file__))\n # 当前文件所在的目录,即父路径\n currentFolder = os.path.split(os.path.realpath(__file__))[0]\n # print(os.path.split(os.path.realpath(__file__))[0])\n sourcePath = currentFolder + \"/\" + sourcePath\n targetPath = currentFolder + \"/\" + targetPath\n\n print 'target folder: ', targetPath\n\n if sourcePath == '' or targetPath == '':\n print \"missing source or target\"\n sys.exit(2)\n\n copyFolder(sourcePath, targetPath)\n\n #oldName = outputfile + \"/app/src\"\n renameFolder(targetPath)\n walkFile(targetPath)\n\n\n# 需要修改的文件夹名\nFOLDER_dict = {\n \n}\n\n# 修改文件夹名称\n\n\ndef renameFolder(folderName):\n print 'renameFolder: ' + folderName\n # 修改文件夹名\n for parent, dirnames, filenames in os.walk(folderName, topdown=False):\n\n for filename in filenames:\n file_ext = filename.rsplit('.', 1)\n if len(file_ext) != 2:\n continue\n changeFileName(parent, filename)\n\n for dirname in dirnames:\n pathdir = os.path.join(parent, dirname)\n for k in FOLDER_dict.keys():\n if dirname == k:\n print 'rename: ' + pathdir\n os.renames(pathdir, pathdir.replace(k, FOLDER_dict.get(k)))\n\n\n# 需要修改的文件名\nFILE_dict = {\n \"im_flutter_sdk.iml\":\"agora_chat_sdk.iml\",\n\n # /lib/\n \"im_flutter_sdk.dart\":\"agora_chat_sdk.dart\",\n\n # /lib/src/\n \"em_client.dart\":\"chat_client.dart\",\n \"em_contact_manager.dart\":\"chat_contact_manager.dart\",\n \"em_group_manager.dart\":\"chat_group_manager.dart\",\n \"em_listeners.dart\":\"chat_listeners.dart\",\n \"em_chat_manager.dart\":\"chat_manager.dart\",\n \"em_push_manager.dart\":\"chat_push_manager.dart\",\n \"em_chat_room_manager.dart\":\"chat_room_manager.dart\",\n \"em_userInfo_manager.dart\":\"chat_userInfo_manager.dart\",\n \"em_chat_manager_moderation_plugin.dart\":\"chat_manager_moderation_plugin.dart\",\n \"em_chat_manager_reaction_plugin.dart\":\"chat_manager_reaction_plugin.dart\",\n \"em_chat_manager_transform_plugin.dart\":\"chat_manager_transform_plugin.dart\",\n \"em_chat_thread_manager.dart\":\"chat_thread_manager.dart\",\n \"em_message_status_callback.dart\":\"chat_message_status_callback.dart\",\n \"em_presence_manager.dart\":\"chat_presence_manager.dart\",\n \"em_status_listener.dart\":\"chat_status_listener.dart\",\n\n # /lib/src/internal\n \"em_sdk_method.dart\":\"chat_method_keys.dart\",\n \"em_push_config.dart\":\"chat_push_config.dart\",\n \"em_channel_manager.dart\":\"chat_channel_manager.dart\",\n \"em_event_keys.dart\":\"chat_event_keys.dart\",\n \"em_transform_tools.dart\":\"chat_transform_tools.dart\",\n \n \n # /lib/src/models/\n \"em_conversation.dart\":\"chat_conversation.dart\",\n \"em_cursor_result.dart\":\"chat_cursor_result.dart\",\n \"em_deviceInfo.dart\":\"chat_deviceInfo.dart\",\n \"em_chat_enums.dart\":\"chat_enums.dart\",\n \"em_error.dart\":\"chat_error.dart\",\n \"em_group_message_ack.dart\":\"chat_group_message_ack.dart\",\n \"em_group_options.dart\":\"chat_group_options.dart\",\n \"em_group.dart\":\"chat_group.dart\",\n \"em_message.dart\":\"chat_message.dart\",\n \"em_options.dart\":\"chat_options.dart\",\n \"em_page_result.dart\":\"chat_page_result.dart\",\n \"em_push_configs.dart\":\"chat_push_configs.dart\",\n \"em_chat_room.dart\":\"chat_room.dart\",\n \"em_userInfo.dart\":\"chat_userInfo.dart\",\n \"em_group_shared_file.dart\":\"chat_group_shared_file.dart\",\n \"em_chat_thread_event.dart\":\"chat_thread_event.dart\",\n \"em_chat_thread.dart\":\"chat_thread.dart\",\n \"em_cmd_message_body.dart\":\"chat_cmd_message_body.dart\",\n \"em_custom_message_body.dart\":\"chat_custom_message_body.dart\",\n \"em_download_callback.dart\":\"chat_download_callback.dart\",\n \"em_file_message_body.dart\":\"chat_file_message_body.dart\",\n \"em_group_info.dart\":\"chat_group_info.dart\",\n \"em_image_message_body.dart\":\"chat_image_message_body.dart\",\n \"em_location_message_body.dart\":\"chat_location_message_body.dart\",\n \"em_message_body.dart\":\"chat_message_body.dart\",\n \"em_message_reaction_change.dart\":\"chat_message_reaction_change.dart\",\n \"em_message_reaction.dart\":\"chat_message_reaction.dart\",\n \"em_presence.dart\":\"chat_presence.dart\",\n \"em_text_message_body.dart\":\"chat_text_message_body.dart\",\n \"em_translate_language.dart\":\"chat_translate_language.dart\",\n \"em_video_message_body.dart\":\"chat_video_message_body.dart\",\n \"em_voice_message_body.dart\":\"chat_voice_message_body.dart\",\n \"em_domain_terms.dart\":\"chat_domain_terms.dart\",\n \n \n # /lib/src/tools/\n \"em_extension.dart\":\"chat_extension.dart\",\n \"em_log.dart\":\"chat_log.dart\",\n \"em_progress_manager.dart\":\"chat_progress_manager.dart\",\n\n # /ios/\n \"im_flutter_sdk.podspec\":\"agora_chat_sdk.podspec\"\n}\n\n\ndef changeFileName(parent, filename):\n pathdir = os.path.join(parent, filename)\n for k in FILE_dict.keys():\n if filename == k:\n value = FILE_dict.get(k)\n print '%-40s' % filename + value\n os.renames(pathdir, pathdir.replace(k, value))\n\n\n# 遍历文件夹\ndef walkFile(file):\n print 'begin walkFile'\n total_yaml_num = 0\n total_gradle_num = 0\n total_dart_num = 0\n total_podspec_num = 0\n total_md_num = 0\n\n for root, dirs, files in os.walk(file):\n\n # root 表示当前正在访问的文件夹路径\n # dirs 表示该文件夹下的子目录名list\n # files 表示该文件夹下的文件list\n\n # 遍历文件\n for f in files:\n #print(os.path.join(root, f))\n file_path = os.path.join(root, f)\n file_ext = file_path.rsplit('.', 1)\n if len(file_ext) != 2:\n # 没有后缀名\n continue\n if file_ext[1] == 'yaml' or file_ext[1] == 'gradle' or file_ext[1] == 'dart' or file_ext[1] == 'podspec' or file_ext[1] == 'md':\n if file_ext[1] == 'yaml':\n total_yaml_num += 1\n if file_ext[1] == 'gradle':\n total_gradle_num += 1\n if file_ext[1] == 'dart':\n total_dart_num += 1\n if file_ext[1] == 'podspec':\n total_podspec_num += 1\n if file_ext[1] == 'md':\n total_md_num += 1\n\n fullname = os.path.join(root, f)\n updateFile(fullname)\n reBackFile(fullname)\n\n\n # 遍历所有的文件夹\n # for d in dirs:\n # print(os.path.join(root, d))\n\n print 'total .yaml files: ' + str(total_yaml_num)\n print 'total .gradle files: ' + str(total_gradle_num)\n print 'total .dart files: ' + str(total_dart_num)\n print 'total .podspec files: ' + str(total_podspec_num)\n print 'total .md files: ' + str(total_md_num)\n\n\n# 需要替换的字符串map\nUPDATE_dict = {\n # pubspec.yaml\n \"name: im_flutter_sdk\": \"name: agora_chat_sdk\",\n \n # lib/src/agora_chat_sdk.dart\n \"library im_flutter_sdk\":\"library agora_chat_sdk\",\n \"Easemob IM flutter SDK.\":\"agora chat flutter SDK.\",\n \"http://www.easemob.com/product/im\":\"https://www.agora.io\",\n\n # agora_chat_sdk.dart\n \"em_client\":\"chat_client\",\n \"em_chat_manager\":\"chat_manager\",\n \"em_contact_manager\":\"chat_contact_manager\",\n \"em_group_manager\":\"chat_group_manager\",\n \"em_push_manager\":\"chat_push_manager\",\n \"em_userInfo_manager\":\"chat_userInfo_manager\",\n \"em_domain_terms\":\"chat_domain_terms\",\n \"em_message\":\"chat_message\",\n \"em_group_message_ack\":\"chat_group_message_ack\",\n \"em_log\":\"chat_log\",\n \"em_listeners\":\"chat_listeners\",\n \"em_chat_room\":\"chat_room\",\n \"em_conversation\":\"chat_conversation\",\n \"em_cursor_result\":\"chat_cursor_result\",\n \"em_deviceInfo\":\"chat_deviceInfo\",\n \"em_error\":\"chat_error\",\n \"em_group\":\"chat_group\",\n \"em_options\":\"chat_options\",\n \"em_push_config\":\"chat_push_config\",\n \"em_page_result\":\"chat_page_result\",\n \"em_userInfo\":\"chat_userInfo\",\n \"em_chat_enums\":\"chat_enums\",\n \"em_transform_tools\":\"chat_transform_tools\",\n \"em_presence\":\"chat_presence\",\n \"em_file_message_body\":\"chat_file_message_body\",\n \"em_chat_thread\":\"chat_thread\",\n \"em_text_message_body\":\"chat_text_message_body\",\n \"em_image_message_body\":\"chat_image_message_body\",\n \"em_location_message_body\":\"chat_location_message_body\",\n \"em_voice_message_body\":\"chat_voice_message_body\",\n \"em_video_message_body\":\"chat_video_message_body\",\n \"em_custom_message_body\":\"chat_custom_message_body\",\n \"em_cmd_message_body\":\"chat_cmd_message_body\",\n \"em_status_listener\":\"chat_status_listener\",\n \"em_progress_manager\":\"chat_progress_manager\",\n \"em_event_keys\":\"chat_event_keys\",\n \"em_download_callback\":\"chat_download_callback\",\n \"em_chat_thread_event\":\"chat_thread_event\",\n \"em_channel_manager\":\"chat_channel_manager\",\n \"em_translate_language\":\"chat_translate_language\",\n \n \"em_extension\":\"chat_extension\",\n \"/im_flutter_sdk.dart\":\"/agora_chat_sdk.dart\",\n \"package:im_flutter_sdk\":\"package:agora_chat_sdk\",\n \"im_flutter_sdk:\":\"agora_chat_sdk:\",\n\n #class name.\n \"EMClient\":\"ChatClient\",\n \"EMMessageChatType\":\"ChatMessageChatType\",\n \"EMMessageDirection\":\"ChatMessageDirection\",\n \"EMMessageStatus\":\"ChatMessageStatus\",\n \"EMDownloadStatus\":\"ChatDownloadStatus\",\n \"EMMessageBodyType\":\"ChatMessageBodyType\",\n \"EMMessageStatusListener\":\"ChatMessageStatusListener\",\n \"EMMessage\":\"ChatMessage\",\n \"EMMessageBody\":\"ChatMessageBody\",\n \"EMTextMessageBody\":\"ChatTextMessageBody\",\n \"EMLocationMessageBody\":\"ChatLocationMessageBody\",\n \"EMFileMessageBody\":\"ChatFileMessageBody\",\n \"EMImageMessageBody\":\"ChatImageMessageBody\",\n \"EMVideoMessageBody\":\"ChatVideoMessageBody\",\n \"EMVoiceMessageBody\":\"ChatVoiceMessageBody\",\n \"EMCmdMessageBody\":\"ChatCmdMessageBody\",\n \"EMCustomMessageBody\":\"ChatCustomMessageBody\",\n \"EMOptions\":\"ChatOptions\",\n \"EMPushConfig\":\"ChatPushConfig\",\n \"EMPageResult\":\"ChatPageResult\",\n \"EMImPushStyle\":\"ChatPushStyle\",\n \"EMPushConfigExtension\":\"ChatPushConfigExtension\",\n \"EMChatRoom\":\"ChatRoom\",\n \"EMUserInfo\":\"ChatUserInfo\",\n \"EMImPushStyle\":\"ChatPushStyle\",\n \"EMConversationType\":\"ChatConversationType\",\n \"EMConversation\":\"ChatConversation\",\n \"EMConversationExtension\":\"ChatConversationExtension\",\n \"EMError\":\"ChatError\",\n \"EMMessageSearchDirection\":\"ChatMessageSearchDirection\",\n \"EMCursorResult\":\"ChatCursorResult\",\n \"EMDeviceInfo\":\"ChatDeviceInfo\",\n \"EMChatManagerListener\":\"ChatManagerListener\",\n \"EMGroupMessageAck\":\"ChatGroupMessageAck\",\n \"EMGroupStyle\":\"ChatGroupStyle\",\n \"EMGroup\":\"ChatGroup\",\n \"EMGroupPermissionType\":\"ChatGroupPermissionType\",\n \"EMGroupOptions\":\"ChatGroupOptions\",\n \"EMGroupSharedFile\":\"ChatGroupSharedFile\",\n \"EMContactGroupEvent\":\"ChatContactGroupEvent\",\n \"EMContactManager\":\"ChatContactManager\",\n \"EMChatManager\":\"ChatManager\",\n \"EMGroupManager\":\"ChatGroupManager\",\n \"EMPushManager\":\"ChatPushManager\",\n \"EMContactEventListener\":\"ChatContactEventListener\",\n \"EMContactChangeEvent\":\"ChatContactChangeEvent\",\n \"EMImPushConfig\":\"ChatPushConfig\",\n \"EMMethodChannel\":\"ChatMethodChannel\",\n \"EMDownloadCallback\":\"ChatDownloadCallback\",\n \"EMChatThread\":\"ChatThread\",\n \"EMCustomListener\":\"ChatCustomListener\",\n \"EMMultiDeviceListener\":\"ChatMultiDeviceListener\",\n \"EMMultiDevicesEvent\":\"ChatMultiDevicesEvent\",\n \"EMPresenceManagerListener\":\"ChatPresenceManagerListener\",\n \"EMTransformPlugin\":\"ChatTransformPlugin\",\n \"EMReactionPlugin\":\"ChatReactionPlugin\",\n \"EMModerationPlugin\":\"ChatModerationPlugin\",\n \"EMConnectionListener\":\"ChatConnectionListener\",\n \"EMSearchDirection\":\"ChatSearchDirection\",\n \"EMProgressManager\":\"ChatProgressManager\",\n \"EMChatRoom\":\"ChatRoom\",\n \"EMRoomManager\":\"ChatRoomManager\",\n \"EMPresence\":\"ChatPresence\",\n \"EMTranslateLanguage\":\"ChatTranslateLanguage\",\n \"EMContact\":\"Contact\",\n \"EMConnection\":\"Connection\",\n \"EMMultiDevice\":\"ChatMultiDevice\",\n \"EMChatEventHandler\":\"ChatEventHandler\",\n \n\n #podspec\n \"= 'im_flutter_sdk'\":\"= 'agora_chat_sdk'\",\n\n #doc\n #\"im_flutter_sdk\":\"agora_chat_sdk\",\n \n \"环信即时通讯\":\" Agora \",\n}\n\n# 将修改错误的再改回来\nRE_BACK_dict = {\n \"rootProject.name = 'agora_chat_sdk'\":\"rootProject.name = 'im_flutter_sdk'\"\n}\n\n\n# 按照UPDATE_dict中的内容查找替换文件内容\n\n\ndef updateFile(file):\n \"\"\"\n 替换文件中的字符串\n :param file:文件名\n :param old_str:旧字符串\n :param new_str:新字符串\n :return:\n \"\"\"\n file_data = \"\"\n with open(file, \"r\") as f:\n for line in f:\n for k in UPDATE_dict.keys():\n line = line.replace(k, UPDATE_dict.get(k))\n\n file_data += line\n\n with open(file, \"w\") as f:\n f.write(file_data)\n\n\ndef reBackFile(file):\n file_data = \"\"\n with open(file, \"r\") as f:\n for line in f:\n for k in RE_BACK_dict.keys():\n line = line.replace(k, RE_BACK_dict.get(k))\n\n file_data += line\n\n with open(file, \"w\") as f:\n f.write(file_data)\n\n\n\n# 复制文件夹\n\n\ndef copyFolder(source, target):\n source_path = source\n target_path = target\n print 'begin copy ' + source\n print 'to ' + target\n\n if not os.path.exists(target_path):\n # 如果目标路径不存在原文件夹的话就创建\n os.makedirs(target_path)\n\n if os.path.exists(source_path):\n # 如果目标路径存在原文件夹的话就先删除\n shutil.rmtree(target_path)\n\n shutil.copytree(source_path, target_path)\n print('copy dir finished!')\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n" }, { "alpha_fraction": 0.6594464778900146, "alphanum_fraction": 0.660649836063385, "avg_line_length": 33.625, "blob_id": "b9e683ebed8f2173f0d5592195de4969015bd031", "content_id": "473f1299b40e0598050fc4a22ee02e053f48a966", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Swift", "length_bytes": 831, "license_type": "permissive", "max_line_length": 101, "num_lines": 24, "path": "/example/ios/Runner/AppDelegate.swift", "repo_name": "easemob/im_flutter_sdk", "src_encoding": "UTF-8", "text": "import UIKit\nimport Flutter\nimport im_flutter_sdk\n\n@UIApplicationMain\n@objc class AppDelegate: FlutterAppDelegate {\n override func application(\n _ application: UIApplication,\n didFinishLaunchingWithOptions launchOptions: [UIApplication.LaunchOptionsKey: Any]?\n ) -> Bool {\n GeneratedPluginRegistrant.register(with: self)\n \n// Timer.scheduledTimer(withTimeInterval: 3, repeats: false) { t in\n// if(self.hasPlugin(\"ImFlutterSdkPlugin\")) {\n// print(\"find\")\n// let wrapper = self.valuePublished(byPlugin: \"ImFlutterSdkPlugin\") as! EMClientWrapper\n// wrapper.sendData(toFlutter: [\"key\":\"value\"])\n// }else {\n// print(\"no find\")\n// }\n// }\n return super.application(application, didFinishLaunchingWithOptions: launchOptions)\n }\n}\n" }, { "alpha_fraction": 0.6840606927871704, "alphanum_fraction": 0.6878557801246643, "avg_line_length": 28.27777862548828, "blob_id": "2172832195cd85dcce46b325e318a8c0e3cdce56", "content_id": "b1d3d6f1213ded1b634b69c18f2562470b8b85eb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Kotlin", "length_bytes": 1054, "license_type": "permissive", "max_line_length": 103, "num_lines": 36, "path": "/example/android/app/src/main/kotlin/com/example/quick_start/MainActivity.kt", "repo_name": "easemob/im_flutter_sdk", "src_encoding": "UTF-8", "text": "package com.example.quick_start\n\n\nimport android.content.Context\nimport android.os.Bundle\nimport android.os.PersistableBundle\nimport android.util.AttributeSet\nimport android.view.ActionMode\nimport android.view.View\nimport com.easemob.im_flutter_sdk.ImFlutterSdkPlugin\nimport io.flutter.embedding.android.FlutterActivity\nimport io.flutter.embedding.engine.FlutterEngine\nimport java.util.*\n\n\nclass MainActivity: FlutterActivity() {\n\n override fun configureFlutterEngine(flutterEngine: FlutterEngine) {\n super.configureFlutterEngine(flutterEngine)\n\n val task: TimerTask = object : TimerTask() {\n override fun run() {\n testCode();\n }\n }\n val timer = Timer()\n timer.schedule(task, 3000)\n }\n\n fun testCode() {\n// if (flutterEngine?.plugins?.has(ImFlutterSdkPlugin::class.java) == true){\n// var p = flutterEngine?.plugins?.get(ImFlutterSdkPlugin::class.java) as ImFlutterSdkPlugin\n// p.sendDataToFlutter(mapOf(\"key\" to \"Value\"));\n// }\n }\n }\n" }, { "alpha_fraction": 0.6183369159698486, "alphanum_fraction": 0.6481876373291016, "avg_line_length": 17.038461685180664, "blob_id": "99087480e3d55a8d34beda8365ce4f6c0133fbb7", "content_id": "213dea090d028612ee3583d19f66e69c8cc3e911", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Gradle", "length_bytes": 954, "license_type": "permissive", "max_line_length": 75, "num_lines": 52, "path": "/android/build.gradle", "repo_name": "easemob/im_flutter_sdk", "src_encoding": "UTF-8", "text": "group 'com.easemob.im_flutter_sdk'\nversion '1.0-SNAPSHOT'\n\nbuildscript {\n repositories {\n google()\n mavenCentral()\n }\n\n dependencies {\n classpath 'com.android.tools.build:gradle:4.2.0'\n }\n}\n\nallprojects {\n repositories {\n google()\n mavenCentral()\n }\n}\n\napply plugin: 'com.android.library'\n\nandroid {\n compileSdkVersion 28\n buildToolsVersion = '28.0.3'\n\n defaultConfig {\n minSdkVersion 21\n testInstrumentationRunner \"androidx.test.runner.AndroidJUnitRunner\"\n }\n\n //自3.6.0开始需要java8的支持\n compileOptions {\n sourceCompatibility JavaVersion.VERSION_1_8\n targetCompatibility JavaVersion.VERSION_1_8\n }\n\n lintOptions {\n disable 'InvalidPackage'\n }\n\n}\n\ntasks.withType(JavaCompile){\n options.encoding = \"UTF-8\"\n}\n\ndependencies {\n api 'androidx.appcompat:appcompat:1.1.0'\n implementation 'io.hyphenate:hyphenate-chat:4.1.1'\n}\n" }, { "alpha_fraction": 0.5372477173805237, "alphanum_fraction": 0.5414679050445557, "avg_line_length": 23.549549102783203, "blob_id": "c8bc1489a3a0a2c79e8066ef055f2cad75191517", "content_id": "609274357eab81a84d4e68aef848fa589bda5d9b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 12214, "license_type": "permissive", "max_line_length": 104, "num_lines": 444, "path": "/README.md", "repo_name": "easemob/im_flutter_sdk", "src_encoding": "UTF-8", "text": "# 环信即时通讯 im_flutter_sdk 快速入门\n\n通过本文可以实现一个集成聊天 SDK 的简单 app。\n\n## 实现原理\n\n下图展示在客户端发送和接收一对一文本消息的工作流程。\n\n<img src=https://docs-im.easemob.com/_media/ccim/web/sendandreceivemsg.png width=\"80%\">\n\n如上图所示,发送和接收单聊消息的步骤如下:\n\n1. 客户端向你的应用服务器请求 Token,你的应用服务器返回 Token。\n2. 客户端 A 和客户端 B 使用获得的 Token 登录环信即时通讯系统。\n3. 客户端 A 发送消息到环信即时通讯服务器。\n4. 环信即时通讯服务器将消息发送到客户端 B,客户端 B 接收消息。\n\n## 前提条件\n\n如果你的目标平台是iOS,你需要满足一下要求:\n- Xcode 12.4 或以上版本,包括命令行工具;\n- iOS 10 或以上版本;\n- Android SDK API 等级 21 或以上版本;\n- Android Studio 4.0 或以上版本,包括 JDK 1.8 或以上版本;\n- CocoaPods 包管理工具;\n- Flutter 2.10 或以上版本;\n- Dart 2.16 或以上版本;\n\n[配置开发或者运行环境如果遇到问题,请参考这里](https://docs.flutter.dev/get-started/install)\n- 有效的环信即时通讯 IM 开发者账号和 App Key,详见 [环信即时通讯云控制台](https://console.easemob.com/user/login)。\n\n## 项目设置\n\n### 使用命令创建项目\n\n打开终端,进入需要创建项目的目录,输入命令进行 `flutter create` 项目创建:\n\n```bash\nflutter create quick_start\n```\n\n### 设置 Android\n\n1. 打开文件 `quick_start/android/app/build.gradle` 在文件最后添加:\n\n```gradle\nandroid {\n defaultConfig {\n minSdkVersion 21\n }\n}\n```\n\n1. 在 `quick_start/android/app/proguard-rules.pro` 中设置免混淆规则:\n\n```java\n-keep class com.hyphenate.** {*;}\n-dontwarn com.hyphenate.**\n```\n\n### 设置 iOS\n\n打开文件 `quick_start/ios/Runner.xcodeproj`,然后:\n\n1. 找到 `TARGETS > Runner ` 在 `General` 的 `Deployment Info` 中修改最低版本为 `iOS 10.0`\n\n### 集成 SDK\n\n在终端命令行,输入命令添加依赖:\n\n```bash\ncd quick_start\nflutter pub add im_flutter_sdk\nflutter pub get\n```\n\n## 添加示例代码\n\n打开 `quick_start/lib/main.dart` 文件,引入头文件:\n\n```dart\nimport 'package:flutter/material.dart';\nimport 'package:im_flutter_sdk/im_flutter_sdk.dart';\n```\n\n修改 `_MyHomePageState` 代码:\n\n```dart\nclass _MyHomePageState extends State<MyHomePage> {\n ScrollController scrollController = ScrollController();\n String _username = \"\";\n String _password = \"\";\n String _messageContent = \"\";\n String _chatId = \"\";\n final List<String> _logText = [];\n\n @override\n void initState() {\n super.initState();\n _initSDK();\n _addChatListener();\n }\n\n @override\n Widget build(BuildContext context) {\n return Scaffold(\n appBar: AppBar(\n title: Text(widget.title),\n ),\n body: Container(\n padding: const EdgeInsets.only(left: 10, right: 10),\n child: Column(\n crossAxisAlignment: CrossAxisAlignment.stretch,\n mainAxisSize: MainAxisSize.max,\n children: [\n TextField(\n decoration: const InputDecoration(hintText: \"Enter username\"),\n onChanged: (username) => _username = username,\n ),\n TextField(\n decoration: const InputDecoration(hintText: \"Enter password\"),\n onChanged: (password) => _password = password,\n ),\n const SizedBox(height: 10),\n Row(\n mainAxisAlignment: MainAxisAlignment.spaceEvenly,\n children: [\n Expanded(\n flex: 1,\n child: TextButton(\n onPressed: _signIn,\n child: const Text(\"SIGN IN\"),\n style: ButtonStyle(\n foregroundColor: MaterialStateProperty.all(Colors.white),\n backgroundColor:\n MaterialStateProperty.all(Colors.lightBlue),\n ),\n ),\n ),\n const SizedBox(width: 10),\n Expanded(\n child: TextButton(\n onPressed: _signOut,\n child: const Text(\"SIGN OUT\"),\n style: ButtonStyle(\n foregroundColor: MaterialStateProperty.all(Colors.white),\n backgroundColor:\n MaterialStateProperty.all(Colors.lightBlue),\n ),\n ),\n ),\n const SizedBox(width: 10),\n Expanded(\n child: TextButton(\n onPressed: _signUp,\n child: const Text(\"SIGN UP\"),\n style: ButtonStyle(\n foregroundColor: MaterialStateProperty.all(Colors.white),\n backgroundColor:\n MaterialStateProperty.all(Colors.lightBlue),\n ),\n ),\n ),\n ],\n ),\n const SizedBox(height: 10),\n TextField(\n decoration: const InputDecoration(\n hintText: \"Enter recipient's user name\"),\n onChanged: (chatId) => _chatId = chatId,\n ),\n TextField(\n decoration: const InputDecoration(hintText: \"Enter message\"),\n onChanged: (msg) => _messageContent = msg,\n ),\n const SizedBox(height: 10),\n TextButton(\n onPressed: _sendMessage,\n child: const Text(\"SEND TEXT\"),\n style: ButtonStyle(\n foregroundColor: MaterialStateProperty.all(Colors.white),\n backgroundColor: MaterialStateProperty.all(Colors.lightBlue),\n ),\n ),\n Flexible(\n child: ListView.builder(\n controller: scrollController,\n itemBuilder: (_, index) {\n return Text(_logText[index]);\n },\n itemCount: _logText.length,\n ),\n ),\n ],\n ),\n ),\n );\n }\n\n void _initSDK() async {\n }\n\n void _addChatListener() {\n }\n\n void _signIn() async {\n }\n\n void _signOut() async {\n }\n\n void _signUp() async {\n }\n\n void _sendMessage() async {\n }\n\n void _addLogToConsole(String log) {\n _logText.add(_timeString + \": \" + log);\n setState(() {\n scrollController.jumpTo(scrollController.position.maxScrollExtent);\n });\n }\n\n String get _timeString {\n return DateTime.now().toString().split(\".\").first;\n }\n}\n```\n\n### 初始化 SDK\n\n在 `_initSDK` 方法中添加 SDK 初始化:\n\n```dart\n void _initSDK() async {\n EMOptions options = EMOptions(\n appKey: \"<#Your AppKey#>\",\n autoLogin: false,\n );\n await EMClient.getInstance.init(options);\n // 通知sdk ui已经准备好,执行后才会收到`EMChatRoomEventHandler`, `EMContactEventHandler`, `EMGroupEventHandler` 回调。\n await EMClient.getInstance.startCallback();\n }\n```\n\n### 添加 注册环信Id 代码\n\nDemo 中使用 sdk 注册环信id,在真实环境中,可以由你的后台调用环信rest api,\n\n在 `_signUp` 方法中添加登录代码。\n\n```dart\nvoid _signUp() async {\n if (_username.isEmpty || _password.isEmpty) {\n _addLogToConsole(\"username or password is null\");\n return;\n }\n\n try {\n _addLogToConsole(\"begin create account...\");\n await EMClient.getInstance.createAccount(_username, _password);\n _addLogToConsole(\"create account succeed, username: $_username\");\n } on EMError catch (e) {\n _addLogToConsole(\n \"create account failed, code: ${e.code}, desc: ${e.description}\");\n }\n}\n```\n\n\n### 添加登录\n\n在 `_signIn` 方法中添加登录代码。\n\n```dart\nvoid _signIn() async {\n if (_userId.isEmpty || _password.isEmpty) {\n _addLogToConsole(\"username or password is null\");\n return;\n }\n\n try {\n await EMClient.getInstance.login(_userId, _password);\n _addLogToConsole(\"sign in succeed, username: $_userId\");\n } on EMError catch (e) {\n _addLogToConsole(\"sign in failed, e: ${e.code} , ${e.description}\");\n }\n}\n```\n\n### 添加退出\n\n在 `_signOut` 方法中添加退出代码。\n\n```dart\n void _signOut() async {\n try {\n await EMClient.getInstance.logout(true);\n _addLogToConsole(\"sign out succeed\");\n } on EMError catch (e) {\n _addLogToConsole(\n \"sign out failed, code: ${e.code}, desc: ${e.description}\");\n }\n }\n```\n\n\n### 添加发消息\n\n在 `_sendMessage` 方法中添加发消息代码。\n\n```dart\n void _sendMessage() async {\n if (_chatId.isEmpty || _messageContent.isEmpty) {\n _addLogToConsole(\"single chat id or message content is null\");\n return;\n }\n\n var msg = EMMessage.createTxtSendMessage(\n targetId: _chatId,\n content: _messageContent,\n );\n\n EMClient.getInstance.chatManager.sendMessage(msg);\n }\n```\n\n### 添加收消息监听\n\n在 `_addChatListener` 方法中添加代码。\n\n```dart\nvoid _addChatListener() {\n\n // 添加消息状态变更监听\n EMClient.getInstance.chatManager.addMessageEvent(\n // ChatMessageEvent 对应的 key。\n \"UNIQUE_HANDLER_ID\",\n ChatMessageEvent(\n onSuccess: (msgId, msg) {\n _addLogToConsole(\"send message succeed\");\n },\n onProgress: (msgId, progress) {\n _addLogToConsole(\"send message succeed\");\n },\n onError: (msgId, msg, error) {\n _addLogToConsole(\n \"send message failed, code: ${error.code}, desc: ${error.description}\",\n );\n },\n ));\n\n // 添加收消息监听\n EMClient.getInstance.chatManager.addEventHandler(\n // EMChatEventHandle 对应的 key。\n \"UNIQUE_HANDLER_ID\",\n EMChatEventHandler(\n onMessagesReceived: (messages) {\n for (var msg in messages) {\n switch (msg.body.type) {\n case MessageType.TXT:\n {\n EMTextMessageBody body = msg.body as EMTextMessageBody;\n _addLogToConsole(\n \"receive text message: ${body.content}, from: ${msg.from}\",\n );\n }\n break;\n case MessageType.IMAGE:\n {\n _addLogToConsole(\n \"receive image message, from: ${msg.from}\",\n );\n }\n break;\n case MessageType.VIDEO:\n {\n _addLogToConsole(\n \"receive video message, from: ${msg.from}\",\n );\n }\n break;\n case MessageType.LOCATION:\n {\n _addLogToConsole(\n \"receive location message, from: ${msg.from}\",\n );\n }\n break;\n case MessageType.VOICE:\n {\n _addLogToConsole(\n \"receive voice message, from: ${msg.from}\",\n );\n }\n break;\n case MessageType.FILE:\n {\n _addLogToConsole(\n \"receive image message, from: ${msg.from}\",\n );\n }\n break;\n case MessageType.CUSTOM:\n {\n _addLogToConsole(\n \"receive custom message, from: ${msg.from}\",\n );\n }\n break;\n case MessageType.CMD:\n {\n // 当前回调中不会有 CMD 类型消息,CMD 类型消息通过 [EMChatEventHandler.onCmdMessagesReceived] 回调接收\n }\n break;\n }\n }\n },\n ),\n );\n}\n```\n\n### 移除消息监听\n\n在 `dispose` 方法中添加代码移除监听:\n\n```dart\n@override\nvoid dispose() {\n EMClient.getInstance.chatManager.removeMessageEvent(\"UNIQUE_HANDLER_ID\");\n EMClient.getInstance.chatManager.removeEventHandler(\"UNIQUE_HANDLER_ID\");\n super.dispose();\n}\n```\n\n## 运行项目\n\n以 iOS 为例,首先打开模拟器,之后在终端输入。\n\n```bash\nflutter run\n```\n" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.75, "avg_line_length": 35, "blob_id": "7334b562dfe5546df7e75c587a86acdf03e0eabb", "content_id": "eed6e197b6a0b176aaa39411c102db4c861f0656", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Gradle", "length_bytes": 36, "license_type": "permissive", "max_line_length": 35, "num_lines": 1, "path": "/android/settings.gradle", "repo_name": "easemob/im_flutter_sdk", "src_encoding": "UTF-8", "text": "rootProject.name = 'im_flutter_sdk'\n" }, { "alpha_fraction": 0.5719237327575684, "alphanum_fraction": 0.574523389339447, "avg_line_length": 36.83606719970703, "blob_id": "e0266331b74165b1ffb8d1c67d541c4e9a2f035b", "content_id": "e7a07a42fca180fa32d03dfa58250f941f6c5a21", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2502, "license_type": "permissive", "max_line_length": 111, "num_lines": 61, "path": "/common_replace.py", "repo_name": "easemob/im_flutter_sdk", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport shutil\nimport re\nimport argparse\n\ndef parse_args():\n \"\"\"\n 解析命令行参数\n \"\"\"\n parser = argparse.ArgumentParser(description='过滤Dart文件中的中文或英文注释',\n add_help=True)\n parser.add_argument('language', choices=['chinese', 'english'], help='要过滤的注释语言')\n parser.add_argument('src_folder', help='要过滤的源文件夹')\n parser.add_argument('dst_folder', help='要输出过滤后文件的目标文件夹')\n parser.add_argument('--file-type', '-t', default='.dart', help='要过滤的文件类型(默认为.dart)')\n return parser.parse_args()\n\ndef filter_folder(src_folder, dst_folder, file_type, language):\n \"\"\"\n 过滤指定文件夹中的文件,并输出到目标文件夹\n \"\"\"\n for root, dirs, files in os.walk(src_folder):\n for file in files:\n if file.endswith(file_type):\n src_path = os.path.join(root, file)\n dst_path = os.path.join(dst_folder, os.path.relpath(src_path, start=src_folder))\n os.makedirs(os.path.dirname(dst_path), exist_ok=True)\n if not os.path.basename(src_path) == \".DS_Store\":\n shutil.copy2(src_path, dst_path)\n filter_comments(dst_path, language)\n else:\n src_path = os.path.join(root, file)\n dst_path = os.path.join(dst_folder, os.path.relpath(src_path, start=src_folder))\n os.makedirs(os.path.dirname(dst_path), exist_ok=True)\n if not os.path.basename(src_path) == \".DS_Store\":\n shutil.copy2(src_path, dst_path)\n\ndef filter_comments(file_path, language):\n \"\"\"\n 过滤文件中的中文或英文注释\n \"\"\"\n with open(file_path, 'r', encoding='utf-8') as f:\n content = f.read()\n if language == 'chinese':\n pattern = r'~chinese[\\s\\S]*?~end'\n elif language == 'english':\n pattern = r'~english[\\s\\S]*?~end'\n else:\n return\n filtered_content = re.sub(pattern, '', content)\n filtered_content = filtered_content.replace('~chinese', '').replace('~english', '').replace('~end', '')\n \n with open(file_path, 'w', encoding='utf-8') as f:\n f.write(filtered_content)\n\nif __name__ == '__main__':\n args = parse_args()\n filter_folder(args.src_folder, args.dst_folder, args.file_type, args.language)\n" }, { "alpha_fraction": 0.7875000238418579, "alphanum_fraction": 0.7875000238418579, "avg_line_length": 39, "blob_id": "ee4cfaf796b4c11656cae605ed10623a7a32b0f5", "content_id": "85b307f69745704ebb493aede5b0ecb6bfee01bc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 80, "license_type": "permissive", "max_line_length": 62, "num_lines": 2, "path": "/release.sh", "repo_name": "easemob/im_flutter_sdk", "src_encoding": "UTF-8", "text": "flutter format .\nflutter packages pub publish --server=https://pub.dartlang.org\n" }, { "alpha_fraction": 0.5906110405921936, "alphanum_fraction": 0.5919223427772522, "avg_line_length": 45.85160827636719, "blob_id": "14423c2ba19d8232c8d8fd6e351a63c7b98a5e70", "content_id": "7f2b84774bf9bf3eb6478fc228d20e379440af70", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 49569, "license_type": "permissive", "max_line_length": 187, "num_lines": 1058, "path": "/android/src/main/java/com/easemob/im_flutter_sdk/EMChatManagerWrapper.java", "repo_name": "easemob/im_flutter_sdk", "src_encoding": "UTF-8", "text": "package com.easemob.im_flutter_sdk;\n\nimport com.hyphenate.EMConversationListener;\nimport com.hyphenate.EMMessageListener;\nimport com.hyphenate.chat.EMClient;\nimport com.hyphenate.chat.*;\nimport com.hyphenate.chat.EMConversation.EMSearchDirection;\nimport com.hyphenate.chat.EMConversation.EMConversationType;\n\nimport com.hyphenate.chat.EMCursorResult;\nimport com.hyphenate.chat.EMMessage;\nimport com.hyphenate.exceptions.HyphenateException;\n\nimport java.util.ArrayList;\n\nimport org.json.JSONArray;\nimport org.json.JSONException;\nimport org.json.JSONObject;\n\nimport java.util.Collections;\nimport java.util.Comparator;\nimport java.util.HashMap;\nimport java.util.List;\nimport java.util.Map;\n\nimport io.flutter.embedding.engine.plugins.FlutterPlugin;\nimport io.flutter.plugin.common.JSONMethodCodec;\nimport io.flutter.plugin.common.MethodCall;\nimport io.flutter.plugin.common.MethodChannel;\nimport io.flutter.plugin.common.MethodChannel.MethodCallHandler;\nimport io.flutter.plugin.common.MethodChannel.Result;\n\n\npublic class EMChatManagerWrapper extends EMWrapper implements MethodCallHandler {\n\n private MethodChannel messageChannel;\n private EMMessageListener messageListener;\n private EMConversationListener conversationListener;\n\n\n EMChatManagerWrapper(FlutterPlugin.FlutterPluginBinding flutterPluginBinding, String channelName) {\n super(flutterPluginBinding, channelName);\n messageChannel = new MethodChannel(flutterPluginBinding.getBinaryMessenger(), \"com.chat.im/chat_message\", JSONMethodCodec.INSTANCE);\n registerEaseListener();\n }\n\n\n @Override\n public void onMethodCall(MethodCall call, Result result) {\n JSONObject param = (JSONObject) call.arguments;\n try {\n if (EMSDKMethod.sendMessage.equals(call.method)) {\n sendMessage(param, call.method, result);\n } else if (EMSDKMethod.resendMessage.equals(call.method)) {\n resendMessage(param, call.method, result);\n } else if (EMSDKMethod.ackMessageRead.equals(call.method)) {\n ackMessageRead(param, call.method, result);\n } else if (EMSDKMethod.ackGroupMessageRead.equals(call.method)) {\n ackGroupMessageRead(param, call.method, result);\n } else if (EMSDKMethod.ackConversationRead.equals(call.method)) {\n ackConversationRead(param, call.method, result);\n } else if (EMSDKMethod.recallMessage.equals(call.method)) {\n recallMessage(param, call.method, result);\n } else if (EMSDKMethod.getConversation.equals(call.method)) {\n getConversation(param, call.method, result);\n } else if (EMSDKMethod.getThreadConversation.equals(call.method)) {\n getThreadConversation(param, call.method, result);\n } else if (EMSDKMethod.markAllChatMsgAsRead.equals(call.method)) {\n markAllChatMsgAsRead(param, call.method, result);\n } else if (EMSDKMethod.getUnreadMessageCount.equals(call.method)) {\n getUnreadMessageCount(param, call.method, result);\n } else if (EMSDKMethod.updateChatMessage.equals(call.method)) {\n updateChatMessage(param, call.method, result);\n } else if (EMSDKMethod.downloadAttachment.equals(call.method)) {\n downloadAttachment(param, call.method, result);\n } else if (EMSDKMethod.downloadThumbnail.equals(call.method)) {\n downloadThumbnail(param, call.method, result);\n } else if (EMSDKMethod.importMessages.equals(call.method)) {\n importMessages(param, call.method, result);\n } else if (EMSDKMethod.loadAllConversations.equals(call.method)) {\n loadAllConversations(param, call.method, result);\n } else if (EMSDKMethod.getConversationsFromServer.equals(call.method)) {\n getConversationsFromServer(param, call.method, result);\n } else if (EMSDKMethod.deleteConversation.equals(call.method)) {\n deleteConversation(param, call.method, result);\n } else if (EMSDKMethod.fetchHistoryMessages.equals(call.method)) {\n fetchHistoryMessages(param, call.method, result);\n } else if (EMSDKMethod.fetchHistoryMessagesByOptions.equals(call.method)) {\n fetchHistoryMessagesByOptions(param, call.method, result);\n } else if (EMSDKMethod.searchChatMsgFromDB.equals(call.method)) {\n searchChatMsgFromDB(param, call.method, result);\n } else if (EMSDKMethod.getMessage.equals(call.method)) {\n getMessage(param, call.method, result);\n } else if (EMSDKMethod.asyncFetchGroupAcks.equals(call.method)){\n asyncFetchGroupMessageAckFromServer(param, call.method, result);\n } else if (EMSDKMethod.deleteRemoteConversation.equals(call.method)){\n deleteRemoteConversation(param, call.method, result);\n } else if (EMSDKMethod.deleteMessagesBeforeTimestamp.equals(call.method)) {\n deleteMessagesBefore(param, call.method, result);\n } else if (EMSDKMethod.translateMessage.equals(call.method)) {\n translateMessage(param, call.method, result);\n } else if (EMSDKMethod.fetchSupportedLanguages.equals(call.method)) {\n fetchSupportedLanguages(param, call.method, result);\n } else if (EMSDKMethod.addReaction.equals(call.method)) {\n addReaction(param, call.method, result);\n } else if (EMSDKMethod.removeReaction.equals(call.method)) {\n removeReaction(param, call.method, result);\n } else if (EMSDKMethod.fetchReactionList.equals(call.method)) {\n fetchReactionList(param, call.method, result);\n } else if (EMSDKMethod.fetchReactionDetail.equals(call.method)) {\n fetchReactionDetail(param, call.method, result);\n } else if (EMSDKMethod.reportMessage.equals(call.method)) {\n reportMessage(param, call.method, result);\n } else if (EMSDKMethod.fetchConversationsFromServerWithPage.equals(call.method)) {\n getConversationsFromServerWithPage(param, call.method, result);\n } else if (EMSDKMethod.removeMessagesFromServerWithMsgIds.equals(call.method)) {\n removeMessagesFromServerWithMsgIds(param, call.method, result);\n } else if (EMSDKMethod.removeMessagesFromServerWithTs.equals(call.method)) {\n removeMessagesFromServerWithTs(param, call.method, result);\n } else if (EMSDKMethod.getConversationsFromServerWithCursor.equals(call.method)) {\n getConversationsFromServerWithCursor(param, call.method, result);\n } else if (EMSDKMethod.getPinnedConversationsFromServerWithCursor.equals(call.method)) {\n getPinnedConversationsFromServerWithCursor(param, call.method, result);\n } else if (EMSDKMethod.pinConversation.equals(call.method)) {\n pinConversation(param, call.method, result);\n } else if (EMSDKMethod.modifyMessage.equals(call.method)) {\n modifyMessage(param, call.method, result);\n } else if (EMSDKMethod.downloadAndParseCombineMessage.equals(call.method)) {\n downloadAndParseCombineMessage(param, call.method, result);\n }\n else {\n super.onMethodCall(call, result);\n }\n } catch (JSONException ignored) {\n\n }\n }\n\n private void sendMessage(JSONObject param, String channelName, Result result) throws JSONException {\n final EMMessage msg = EMMessageHelper.fromJson(param);\n final String localId = msg.getMsgId();\n msg.setMessageStatusCallback(new EMWrapperCallBack(result, channelName, null) {\n @Override\n public void onSuccess() {\n post(() -> {\n Map<String, Object> map = new HashMap<>();\n map.put(\"message\", EMMessageHelper.toJson(msg));\n map.put(\"localId\", localId);\n messageChannel.invokeMethod(EMSDKMethod.onMessageSuccess, map);\n });\n }\n\n @Override\n public void onProgress(int progress, String status) {\n post(() -> {\n Map<String, Object> map = new HashMap<>();\n map.put(\"progress\", progress);\n map.put(\"localId\", localId);\n messageChannel.invokeMethod(EMSDKMethod.onMessageProgressUpdate, map);\n });\n }\n\n @Override\n public void onError(int code, String desc) {\n Map<String, Object> data = new HashMap<>();\n data.put(\"code\", code);\n data.put(\"description\", desc);\n post(() -> {\n Map<String, Object> map = new HashMap<>();\n map.put(\"message\", EMMessageHelper.toJson(msg));\n map.put(\"localId\", localId);\n map.put(\"error\", data);\n messageChannel.invokeMethod(EMSDKMethod.onMessageError, map);\n });\n }\n });\n asyncRunnable(() -> {\n EMClient.getInstance().chatManager().sendMessage(msg);\n onSuccess(result, channelName, EMMessageHelper.toJson(msg));\n });\n }\n\n private void resendMessage(JSONObject param, String channelName, Result result) throws JSONException {\n EMMessage tempMsg = EMMessageHelper.fromJson(param);\n EMMessage msg = EMClient.getInstance().chatManager().getMessage(tempMsg.getMsgId());\n if (msg == null) {\n msg = tempMsg;\n }\n msg.setStatus(EMMessage.Status.CREATE);\n EMMessage finalMsg = msg;\n final String localId = finalMsg.getMsgId();\n finalMsg.setMessageStatusCallback(new EMWrapperCallBack(result, channelName, null) {\n @Override\n public void onSuccess() {\n post(() -> {\n Map<String, Object> map = new HashMap<>();\n map.put(\"message\", EMMessageHelper.toJson(finalMsg));\n map.put(\"localId\", localId);\n messageChannel.invokeMethod(EMSDKMethod.onMessageSuccess, map);\n });\n }\n\n @Override\n public void onProgress(int progress, String status) {\n post(() -> {\n Map<String, Object> map = new HashMap<>();\n map.put(\"progress\", progress);\n map.put(\"localId\", localId);\n messageChannel.invokeMethod(EMSDKMethod.onMessageProgressUpdate, map);\n });\n }\n\n\n @Override\n public void onError(int code, String desc) {\n Map<String, Object> data = new HashMap<>();\n data.put(\"code\", code);\n data.put(\"description\", desc);\n post(() -> {\n Map<String, Object> map = new HashMap<>();\n map.put(\"message\", EMMessageHelper.toJson(finalMsg));\n map.put(\"localId\", localId);\n map.put(\"error\", data);\n messageChannel.invokeMethod(EMSDKMethod.onMessageError, map);\n });\n }\n });\n EMClient.getInstance().chatManager().sendMessage(msg);\n asyncRunnable(() -> {\n onSuccess(result, channelName, EMMessageHelper.toJson(finalMsg));\n });\n }\n\n private void ackMessageRead(JSONObject param, String channelName, Result result) throws JSONException {\n String msgId = param.getString(\"msg_id\");\n String to = param.getString(\"to\");\n\n asyncRunnable(() -> {\n try {\n EMClient.getInstance().chatManager().ackMessageRead(to, msgId);\n onSuccess(result, channelName, true);\n } catch (HyphenateException e) {\n onError(result, e);\n }\n });\n }\n\n private void ackGroupMessageRead(JSONObject param, String channelName, Result result) throws JSONException {\n String msgId = param.getString(\"msg_id\");\n String to = param.getString(\"group_id\");\n String content = null;\n if(param.has(\"content\")) {\n content = param.getString(\"content\");\n }\n String finalContent = content;\n asyncRunnable(()->{\n try {\n EMClient.getInstance().chatManager().ackGroupMessageRead(to, msgId, finalContent);\n onSuccess(result, channelName, true);\n } catch (HyphenateException e) {\n onError(result, e);\n }\n });\n }\n\n private void ackConversationRead(JSONObject param, String channelName, Result result) throws JSONException {\n String conversationId = param.getString(\"convId\");\n asyncRunnable(() -> {\n try {\n EMClient.getInstance().chatManager().ackConversationRead(conversationId);\n onSuccess(result, channelName, true);\n } catch (HyphenateException e) {\n onError(result, e);\n }\n });\n }\n\n private void recallMessage(JSONObject param, String channelName, Result result) throws JSONException {\n String msgId = param.getString(\"msg_id\");\n\n asyncRunnable(() -> {\n try {\n EMMessage msg = EMClient.getInstance().chatManager().getMessage(msgId);\n if (msg != null) {\n EMClient.getInstance().chatManager().recallMessage(msg);\n onSuccess(result, channelName, true);\n }else {\n onError(result, new HyphenateException(500, \"The message was not found\"));\n }\n } catch (HyphenateException e) {\n onError(result, e);\n }\n });\n }\n\n private void getMessage(JSONObject param, String channelName, Result result) throws JSONException {\n String msgId = param.getString(\"msg_id\");\n\n asyncRunnable(() -> {\n EMMessage msg = EMClient.getInstance().chatManager().getMessage(msgId);\n if(msg == null) {\n onSuccess(result, channelName, null);\n }else {\n onSuccess(result, channelName, EMMessageHelper.toJson(msg));\n }\n });\n }\n\n private void getConversation(JSONObject param, String channelName, Result result) throws JSONException {\n String conId = param.getString(\"convId\");\n boolean createIfNeed = true;\n if (param.has(\"createIfNeed\")) {\n createIfNeed = param.getBoolean(\"createIfNeed\");\n }\n\n EMConversationType type = EMConversationHelper.typeFromInt(param.getInt(\"type\"));\n\n boolean finalCreateIfNeed = createIfNeed;\n asyncRunnable(() -> {\n EMConversation conversation = EMClient.getInstance().chatManager().getConversation(conId, type, finalCreateIfNeed);\n onSuccess(result, channelName, conversation != null ? EMConversationHelper.toJson(conversation) : null);\n });\n }\n\n private void getThreadConversation(JSONObject param, String channelName, Result result) throws JSONException {\n String conId = param.getString(\"convId\");\n asyncRunnable(() -> {\n EMConversation conversation = EMClient.getInstance().chatManager().getConversation(conId, EMConversationType.GroupChat, true, true);\n onSuccess(result, channelName, conversation != null ? EMConversationHelper.toJson(conversation) : null);\n });\n }\n\n private void markAllChatMsgAsRead(JSONObject param, String channelName, Result result) throws JSONException {\n EMClient.getInstance().chatManager().markAllConversationsAsRead();\n\n asyncRunnable(() -> {\n onSuccess(result, channelName, true);\n });\n }\n\n private void getUnreadMessageCount(JSONObject param, String channelName, Result result) throws JSONException {\n int count = EMClient.getInstance().chatManager().getUnreadMessageCount();\n\n asyncRunnable(() -> {\n onSuccess(result, channelName, count);\n });\n }\n\n private void getConversationsFromServerWithPage(JSONObject param, String channelName, Result result) throws JSONException {\n int pageNum = param.getInt(\"pageNum\");\n int pageSize = param.getInt(\"pageSize\");\n EMValueWrapperCallBack<Map<String, EMConversation>> callBack = new EMValueWrapperCallBack<Map<String, EMConversation>>(result,\n channelName) {\n @Override\n public void onSuccess(Map<String, EMConversation> object) {\n ArrayList<EMConversation>list = new ArrayList<>(object.values());\n asyncRunnable(() -> {\n boolean retry = false;\n List<Map> conversations = new ArrayList<>();\n do{\n try{\n retry = false;\n Collections.sort(list, new Comparator<EMConversation>() {\n @Override\n public int compare(EMConversation o1, EMConversation o2) {\n if (o1 == null && o2 == null) {\n return 0;\n }\n if (o1.getLastMessage() == null) {\n return 1;\n }\n\n if (o2.getLastMessage() == null) {\n return -1;\n }\n\n if (o1.getLastMessage().getMsgTime() == o2.getLastMessage().getMsgTime()) {\n return 0;\n }\n\n return o2.getLastMessage().getMsgTime() - o1.getLastMessage().getMsgTime() > 0 ? 1 : -1;\n }\n });\n for (EMConversation conversation : list) {\n conversations.add(EMConversationHelper.toJson(conversation));\n }\n\n }catch(IllegalArgumentException e) {\n retry = true;\n }\n }while (retry);\n updateObject(conversations);\n });\n }\n };\n EMClient.getInstance().chatManager().asyncFetchConversationsFromServer(pageNum, pageSize, callBack);\n }\n\n private void removeMessagesFromServerWithMsgIds(JSONObject params, String channelName, Result result) throws JSONException {\n String conversationId = params.getString(\"convId\");\n EMConversation.EMConversationType type = EMConversationHelper.typeFromInt(params.getInt(\"type\"));\n EMConversation conversation = EMClient.getInstance().chatManager().getConversation(conversationId, type, true);\n\n JSONArray jsonArray = params.getJSONArray(\"msgIds\");\n\n ArrayList<String> msgIds = new ArrayList<>();\n for (int i = 0; i < jsonArray.length(); i++) {\n msgIds.add((String) jsonArray.get(i));\n }\n\n conversation.removeMessagesFromServer(msgIds, new EMWrapperCallBack(result, channelName, null));\n }\n\n private void removeMessagesFromServerWithTs(JSONObject params, String channelName, Result result) throws JSONException {\n String conversationId = params.getString(\"convId\");\n EMConversation.EMConversationType type = EMConversationHelper.typeFromInt(params.getInt(\"type\"));\n EMConversation conversation = EMClient.getInstance().chatManager().getConversation(conversationId, type, true);\n long timestamp = 0;\n if(params.has(\"timestamp\")) {\n timestamp = params.getLong(\"timestamp\");\n }\n conversation.removeMessagesFromServer(timestamp, new EMWrapperCallBack(result, channelName, null));\n }\n\n private void updateChatMessage(JSONObject param, String channelName, Result result) throws JSONException {\n EMMessage msg = EMMessageHelper.fromJson(param.getJSONObject(\"message\"));\n\n asyncRunnable(() -> {\n EMClient.getInstance().chatManager().updateMessage(msg);\n onSuccess(result, channelName, EMMessageHelper.toJson(msg));\n });\n }\n\n private void importMessages(JSONObject param, String channelName, Result result) throws JSONException {\n JSONArray ary = param.getJSONArray(\"messages\");\n List<EMMessage> messages = new ArrayList<>();\n for (int i = 0; i < ary.length(); i++) {\n JSONObject obj = ary.getJSONObject(i);\n messages.add(EMMessageHelper.fromJson(obj));\n }\n\n asyncRunnable(() -> {\n EMClient.getInstance().chatManager().importMessages(messages);\n onSuccess(result, channelName, true);\n });\n }\n\n\n private void downloadAttachment(JSONObject param, String channelName, Result result) throws JSONException {\n EMMessage tempMsg = EMMessageHelper.fromJson(param.getJSONObject(\"message\"));\n final EMMessage msg = EMClient.getInstance().chatManager().getMessage(tempMsg.getMsgId());\n msg.setMessageStatusCallback(new EMWrapperCallBack(result, channelName, null) {\n @Override\n public void onSuccess() {\n post(() -> {\n Map<String, Object> map = new HashMap<>();\n map.put(\"message\", updateDownloadStatus(EMFileMessageBody.EMDownloadStatus.SUCCESSED, msg, false));\n map.put(\"localId\", msg.getMsgId());\n messageChannel.invokeMethod(EMSDKMethod.onMessageSuccess, map);\n });\n }\n\n @Override\n public void onProgress(int progress, String status) {\n post(() -> {\n Map<String, Object> map = new HashMap<>();\n map.put(\"progress\", progress);\n map.put(\"localId\", msg.getMsgId());\n messageChannel.invokeMethod(EMSDKMethod.onMessageProgressUpdate, map);\n });\n }\n\n @Override\n public void onError(int code, String desc) {\n Map<String, Object> data = new HashMap<>();\n data.put(\"code\", code);\n data.put(\"description\", desc);\n post(() -> {\n Map<String, Object> map = new HashMap<>();\n map.put(\"message\", updateDownloadStatus(EMFileMessageBody.EMDownloadStatus.FAILED, msg, false));\n map.put(\"localId\", msg.getMsgId());\n map.put(\"error\", data);\n messageChannel.invokeMethod(EMSDKMethod.onMessageError, map);\n });\n }\n });\n asyncRunnable(() -> {\n EMClient.getInstance().chatManager().downloadAttachment(msg);\n onSuccess(result, channelName, updateDownloadStatus(EMFileMessageBody.EMDownloadStatus.DOWNLOADING, msg, false));\n });\n }\n\n private void downloadThumbnail(JSONObject param, String channelName, Result result) throws JSONException {\n EMMessage tempMsg = EMMessageHelper.fromJson(param.getJSONObject(\"message\"));\n final EMMessage msg = EMClient.getInstance().chatManager().getMessage(tempMsg.getMsgId());\n msg.setMessageStatusCallback(new EMWrapperCallBack(result, channelName, null) {\n @Override\n public void onSuccess() {\n post(() -> {\n Map<String, Object> map = new HashMap<>();\n map.put(\"message\", updateDownloadStatus(EMFileMessageBody.EMDownloadStatus.SUCCESSED, msg, true));\n map.put(\"localId\", msg.getMsgId());\n messageChannel.invokeMethod(EMSDKMethod.onMessageSuccess, map);\n });\n }\n\n @Override\n public void onProgress(int progress, String status) {\n post(() -> {\n Map<String, Object> map = new HashMap<>();\n map.put(\"progress\", progress);\n map.put(\"localId\", msg.getMsgId());\n messageChannel.invokeMethod(EMSDKMethod.onMessageProgressUpdate, map);\n });\n }\n\n @Override\n public void onError(int code, String desc) {\n Map<String, Object> data = new HashMap<>();\n data.put(\"code\", code);\n data.put(\"description\", desc);\n post(() -> {\n Map<String, Object> map = new HashMap<>();\n map.put(\"message\", updateDownloadStatus(EMFileMessageBody.EMDownloadStatus.FAILED, msg, true));\n map.put(\"localId\", msg.getMsgId());\n map.put(\"error\", data);\n messageChannel.invokeMethod(EMSDKMethod.onMessageError, map);\n });\n }\n });\n asyncRunnable(() -> {\n EMClient.getInstance().chatManager().downloadThumbnail(msg);\n onSuccess(result, channelName, updateDownloadStatus(EMFileMessageBody.EMDownloadStatus.DOWNLOADING, msg, true));\n });\n }\n\n private Map<String, Object> updateDownloadStatus(EMFileMessageBody.EMDownloadStatus downloadStatus, EMMessage msg, boolean isThumbnail) {\n boolean canUpdate = false;\n switch (msg.getType()) {\n case FILE:\n case VOICE: {\n if (isThumbnail) {\n break;\n }\n }\n case IMAGE:\n case VIDEO:\n {\n canUpdate = true;\n }\n break;\n default:\n break;\n }\n if (canUpdate) {\n EMMessageBody body = msg.getBody();\n if (msg.getType() == EMMessage.Type.FILE) {\n EMFileMessageBody tmpBody = (EMFileMessageBody) body;\n tmpBody.setDownloadStatus(downloadStatus);\n body = tmpBody;\n }else if (msg.getType() == EMMessage.Type.VOICE) {\n EMVoiceMessageBody tmpBody = (EMVoiceMessageBody) body;\n tmpBody.setDownloadStatus(downloadStatus);\n body = tmpBody;\n }else if (msg.getType() == EMMessage.Type.IMAGE) {\n EMImageMessageBody tmpBody = (EMImageMessageBody) body;\n if (isThumbnail) {\n // android not support now.\n // tmpBody.setThumbnailDownloadStatus(downloadStatus);\n }else {\n tmpBody.setDownloadStatus(downloadStatus);\n }\n\n body = tmpBody;\n }else if (msg.getType() == EMMessage.Type.VIDEO) {\n EMVideoMessageBody tmpBody = (EMVideoMessageBody) body;\n if (isThumbnail) {\n tmpBody.setThumbnailDownloadStatus(downloadStatus);\n }else {\n tmpBody.setDownloadStatus(downloadStatus);\n }\n\n body = tmpBody;\n }\n\n msg.setBody(body);\n }\n return EMMessageHelper.toJson(msg);\n }\n\n private void loadAllConversations(JSONObject param, String channelName, Result result) throws JSONException {\n if (EMClient.getInstance().getCurrentUser() == null || EMClient.getInstance().getCurrentUser().length() == 0) {\n onSuccess(result, channelName, new ArrayList<>());\n return;\n }\n List<EMConversation> list = new ArrayList<>(EMClient.getInstance().chatManager().getAllConversations().values());\n asyncRunnable(() -> {\n boolean retry = false;\n List<Map> conversations = new ArrayList<>();\n do{\n try{\n retry = false;\n Collections.sort(list, new Comparator<EMConversation>() {\n @Override\n public int compare(EMConversation o1, EMConversation o2) {\n if (o1 == null && o2 == null) {\n return 0;\n }\n if (o1.getLastMessage() == null) {\n return 1;\n }\n\n if (o2.getLastMessage() == null) {\n return -1;\n }\n\n if (o1.getLastMessage().getMsgTime() == o2.getLastMessage().getMsgTime()) {\n return 0;\n }\n\n return o2.getLastMessage().getMsgTime() - o1.getLastMessage().getMsgTime() > 0 ? 1 : -1;\n }\n });\n for (EMConversation conversation : list) {\n conversations.add(EMConversationHelper.toJson(conversation));\n }\n }catch(IllegalArgumentException e) {\n retry = true;\n }\n }while (retry);\n onSuccess(result, channelName, conversations);\n });\n }\n\n private void getConversationsFromServer(JSONObject param, String channelName, Result result) throws JSONException {\n asyncRunnable(() -> {\n try {\n List<EMConversation> list = new ArrayList<>(\n EMClient.getInstance().chatManager().fetchConversationsFromServer().values());\n Collections.sort(list, new Comparator<EMConversation>() {\n @Override\n public int compare(EMConversation o1, EMConversation o2) {\n if (o1.getLastMessage() == null) {\n return 1;\n }\n\n if (o2.getLastMessage() == null) {\n return -1;\n }\n\n if (o1.getLastMessage().getMsgTime() == o2.getLastMessage().getMsgTime()) {\n return 0;\n }\n\n return o2.getLastMessage().getMsgTime() - o1.getLastMessage().getMsgTime() > 0 ? 1 : -1;\n }\n });\n List<Map> conversations = new ArrayList<>();\n for (EMConversation conversation : list) {\n conversations.add(EMConversationHelper.toJson(conversation));\n }\n onSuccess(result, channelName, conversations);\n } catch (HyphenateException e) {\n onError(result, e);\n }\n });\n }\n\n private void deleteConversation(JSONObject param, String channelName, Result result) throws JSONException {\n String conId = param.getString(\"convId\");\n boolean isDelete = param.getBoolean(\"deleteMessages\");\n asyncRunnable(() -> {\n boolean ret = EMClient.getInstance().chatManager().deleteConversation(conId, isDelete);\n onSuccess(result, channelName, ret);\n });\n }\n\n private void fetchHistoryMessages(JSONObject param, String channelName, Result result) throws JSONException {\n String conId = param.getString(\"convId\");\n EMConversationType type = EMConversationHelper.typeFromInt(param.getInt(\"type\"));\n int pageSize = param.getInt(\"pageSize\");\n String startMsgId = param.getString(\"startMsgId\");\n EMSearchDirection direction = param.optInt(\"direction\") == 0 ? EMSearchDirection.UP : EMSearchDirection.DOWN;\n asyncRunnable(() -> {\n try {\n EMCursorResult<EMMessage> cursorResult = EMClient.getInstance().chatManager().fetchHistoryMessages(conId,\n type, pageSize, startMsgId, direction);\n onSuccess(result, channelName, EMCursorResultHelper.toJson(cursorResult));\n } catch (HyphenateException e) {\n onError(result, e);\n }\n });\n }\n\n private void fetchHistoryMessagesByOptions(JSONObject param, String channelName, Result result) throws JSONException {\n String conId = param.getString(\"convId\");\n EMConversationType type = EMConversationHelper.typeFromInt(param.getInt(\"type\"));\n int pageSize = param.getInt(\"pageSize\");\n String cursor = null;\n if (param.has(\"cursor\")) {\n cursor = param.getString(\"cursor\");\n }\n EMFetchMessageOption option = null;\n if (param.has(\"options\")) {\n option = FetchHistoryOptionsHelper.fromJson(param.getJSONObject(\"options\"));\n }\n\n EMValueWrapperCallBack<EMCursorResult<EMMessage>> callBack = new EMValueWrapperCallBack<EMCursorResult<EMMessage>>(result,\n channelName) {\n @Override\n public void onSuccess(EMCursorResult<EMMessage> result) {\n updateObject(EMCursorResultHelper.toJson(result));\n }\n };\n\n EMClient.getInstance().chatManager().asyncFetchHistoryMessages(conId, type, pageSize, cursor, option, callBack);\n }\n\n\n private void searchChatMsgFromDB(JSONObject param, String channelName, Result result) throws JSONException {\n String keywords = param.getString(\"keywords\");\n long timestamp = param.getLong(\"timestamp\");\n int count = param.getInt(\"maxCount\");\n String from = param.getString(\"from\");\n EMSearchDirection direction = searchDirectionFromString(param.getString(\"direction\"));\n asyncRunnable(() -> {\n List<EMMessage> msgList = EMClient.getInstance().chatManager().searchMsgFromDB(keywords, timestamp, count,\n from, direction);\n List<Map> messages = new ArrayList<>();\n for (EMMessage msg : msgList) {\n messages.add(EMMessageHelper.toJson(msg));\n }\n onSuccess(result, channelName, messages);\n });\n }\n\n\n private void asyncFetchGroupMessageAckFromServer(JSONObject param, String channelName, Result result) throws JSONException {\n String msgId = param.getString(\"msg_id\");\n String ackId = null;\n if (param.has(\"ack_id\")){\n ackId = param.getString(\"ack_id\");\n }\n int pageSize = param.getInt(\"pageSize\");\n\n EMValueWrapperCallBack<EMCursorResult<EMGroupReadAck>> callBack = new EMValueWrapperCallBack<EMCursorResult<EMGroupReadAck>>(result,\n channelName) {\n @Override\n public void onSuccess(EMCursorResult<EMGroupReadAck> result) {\n updateObject(EMCursorResultHelper.toJson(result));\n }\n };\n\n EMClient.getInstance().chatManager().asyncFetchGroupReadAcks(msgId, pageSize, ackId, callBack);\n }\n\n\n private void deleteRemoteConversation(JSONObject param, String channelName, Result result) throws JSONException {\n String conversationId = param.getString(\"conversationId\");\n EMConversationType type = typeFromInt(param.getInt(\"conversationType\"));\n boolean isDeleteRemoteMessage = param.getBoolean(\"isDeleteRemoteMessage\");\n EMClient.getInstance().chatManager().deleteConversationFromServer(conversationId, type, isDeleteRemoteMessage, new EMWrapperCallBack(result, channelName, null));\n }\n\n private void deleteMessagesBefore(JSONObject param, String channelName, Result result) throws JSONException {\n long timestamp = param.getLong(\"timestamp\");\n EMClient.getInstance().chatManager().deleteMessagesBeforeTimestamp(timestamp, new EMWrapperCallBack(result, channelName, null));\n }\n\n private void translateMessage(JSONObject param, String channelName, Result result) throws JSONException {\n EMMessage msg = EMMessageHelper.fromJson(param.getJSONObject(\"message\"));\n List<String> list = new ArrayList<String>();\n if (param.has(\"languages\")){\n JSONArray array = param.getJSONArray(\"languages\");\n for (int i = 0; i < array.length(); i++) {\n list.add(array.getString(i));\n }\n }\n EMClient.getInstance().chatManager().translateMessage(msg, list, new EMValueWrapperCallBack<EMMessage>(result, channelName){\n @Override\n public void onSuccess(EMMessage object) {\n updateObject(EMMessageHelper.toJson(object));\n }\n });\n }\n\n private void fetchSupportedLanguages(JSONObject param, String channelName, Result result) throws JSONException {\n EMClient.getInstance().chatManager().fetchSupportLanguages(new EMValueWrapperCallBack<List<EMLanguage>>(result, channelName){\n @Override\n public void onSuccess(List<EMLanguage> object) {\n List<Map> list = new ArrayList<>();\n for (EMLanguage language : object) {\n list.add(EMLanguageHelper.toJson(language));\n }\n updateObject(list);\n }\n });\n }\n\n private void addReaction(JSONObject param, String channelName, Result result) throws JSONException {\n String reaction = param.getString(\"reaction\");\n String msgId = param.getString(\"msgId\");\n EMClient.getInstance().chatManager().asyncAddReaction(msgId, reaction, new EMWrapperCallBack(result, channelName, null));\n }\n\n private void removeReaction(JSONObject param, String channelName, Result result) throws JSONException {\n String reaction = param.getString(\"reaction\");\n String msgId = param.getString(\"msgId\");\n EMClient.getInstance().chatManager().asyncRemoveReaction(msgId, reaction, new EMWrapperCallBack(result, channelName, null));\n }\n\n private void fetchReactionList(JSONObject param, String channelName, Result result) throws JSONException {\n List<String> msgIds = new ArrayList<>();\n JSONArray ja = param.getJSONArray(\"msgIds\");\n for (int i = 0; i < ja.length(); i++) {\n msgIds.add(ja.getString(i));\n }\n String groupId = null;\n if (param.has(\"groupId\")) {\n groupId = param.getString(\"groupId\");\n }\n EMMessage.ChatType type = EMMessage.ChatType.Chat;\n int iType = param.getInt(\"chatType\");\n if (iType == 0) {\n type = EMMessage.ChatType.Chat;\n } else if(iType == 1) {\n type = EMMessage.ChatType.GroupChat;\n } else {\n type = EMMessage.ChatType.ChatRoom;\n }\n EMClient.getInstance().chatManager().asyncGetReactionList(msgIds, type, groupId, new EMValueWrapperCallBack<Map<String, List<EMMessageReaction>>>(result, channelName){\n @Override\n public void onSuccess(Map<String, List<EMMessageReaction>> object) {\n HashMap<String, List<Map<String, Object>>> map = new HashMap<>();\n if (object != null) {\n for (Map.Entry<String, List<EMMessageReaction>> entry: object.entrySet()) {\n List<EMMessageReaction> list = entry.getValue();\n ArrayList<Map<String, Object>> ary = new ArrayList<>();\n for (int i = 0; i < list.size(); i++) {\n ary.add(EMMessageReactionHelper.toJson(list.get(i)));\n }\n map.put(entry.getKey(), ary);\n }\n }\n updateObject(map);\n }\n });\n }\n\n private void fetchReactionDetail(JSONObject param, String channelName, Result result) throws JSONException {\n String msgId = param.getString(\"msgId\");\n String reaction = param.getString(\"reaction\");\n String cursor = null;\n if (param.has(\"cursor\")) {\n cursor = param.getString(\"cursor\");\n }\n int pageSize = param.getInt(\"pageSize\");\n EMClient.getInstance().chatManager().asyncGetReactionDetail(msgId, reaction, cursor, pageSize, new EMValueWrapperCallBack<EMCursorResult<EMMessageReaction>>(result, channelName) {\n @Override\n public void onSuccess(EMCursorResult<EMMessageReaction> object) {\n updateObject(EMCursorResultHelper.toJson(object));\n }\n });\n }\n\n private void reportMessage(JSONObject param, String channelName, Result result) throws JSONException {\n String msgId = param.getString(\"msgId\");\n String tag = param.getString(\"tag\");\n String reason = param.getString(\"reason\");\n EMClient.getInstance().chatManager().asyncReportMessage(msgId, tag, reason, new EMWrapperCallBack(result, channelName, true));\n }\n\n private void getConversationsFromServerWithCursor(JSONObject param, String channelName, Result result) throws JSONException {\n String cursor = param.optString(\"cursor\");\n int pageSize = param.optInt(\"pageSize\");\n EMClient.getInstance().chatManager().asyncFetchConversationsFromServer(pageSize, cursor, new EMValueWrapperCallBack<EMCursorResult<EMConversation>>(result, channelName){\n @Override\n public void onSuccess(EMCursorResult<EMConversation> object) {\n super.updateObject(EMCursorResultHelper.toJson(object));\n }\n });\n }\n private void getPinnedConversationsFromServerWithCursor(JSONObject param, String channelName, Result result) throws JSONException {\n String cursor = param.optString(\"cursor\");\n int pageSize = param.optInt(\"pageSize\");\n EMClient.getInstance().chatManager().asyncFetchPinnedConversationsFromServer(pageSize, cursor, new EMValueWrapperCallBack<EMCursorResult<EMConversation>>(result, channelName){\n @Override\n public void onSuccess(EMCursorResult<EMConversation> object) {\n super.updateObject(EMCursorResultHelper.toJson(object));\n }\n });\n }\n private void pinConversation(JSONObject param, String channelName, Result result) throws JSONException {\n String convId = param.optString(\"convId\");\n Boolean isPinned = param.optBoolean(\"isPinned\", false);\n EMClient.getInstance().chatManager().asyncPinConversation(convId, isPinned, new EMWrapperCallBack(result, channelName, null));\n }\n\n private void modifyMessage(JSONObject param, String channelName, Result result) throws JSONException {\n String msgId = param.optString(\"msgId\");\n EMTextMessageBody body = EMMessageBodyHelper.textBodyFromJson(param.optJSONObject(\"body\"));\n EMClient.getInstance().chatManager().asyncModifyMessage(msgId, body, new EMValueWrapperCallBack<EMMessage>(result, channelName) {\n @Override\n public void onSuccess(EMMessage object) {\n updateObject(EMMessageHelper.toJson(object));\n }\n });\n }\n private void downloadAndParseCombineMessage(JSONObject param, String channelName, Result result) throws JSONException {\n EMMessage msg = EMMessageHelper.fromJson(param.optJSONObject(\"message\"));\n EMClient.getInstance().chatManager().downloadAndParseCombineMessage(msg, new EMValueWrapperCallBack<List<EMMessage>>(result, channelName){\n @Override\n public void onSuccess(List<EMMessage> msgList) {\n List<Map> messages = new ArrayList<>();\n for(EMMessage msg: msgList) {\n messages.add(EMMessageHelper.toJson(msg));\n }\n updateObject(messages);\n }\n });\n }\n\n @Override\n public void unRegisterEaseListener() {\n EMClient.getInstance().chatManager().removeMessageListener(messageListener);\n EMClient.getInstance().chatManager().removeConversationListener(conversationListener);\n }\n\n private void registerEaseListener() {\n\n if (messageListener != null) {\n EMClient.getInstance().chatManager().removeMessageListener(messageListener);\n }\n\n messageListener = new EMMessageListener() {\n @Override\n public void onMessageReceived(List<EMMessage> messages) {\n ArrayList<Map<String, Object>> msgList = new ArrayList<>();\n for (EMMessage message : messages) {\n msgList.add(EMMessageHelper.toJson(message));\n }\n post(() -> channel.invokeMethod(EMSDKMethod.onMessagesReceived, msgList));\n }\n\n @Override\n public void onCmdMessageReceived(List<EMMessage> messages) {\n\n ArrayList<Map<String, Object>> msgList = new ArrayList<>();\n for (EMMessage message : messages) {\n msgList.add(EMMessageHelper.toJson(message));\n }\n post(() -> channel.invokeMethod(EMSDKMethod.onCmdMessagesReceived, msgList));\n }\n\n @Override\n public void onMessageRead(List<EMMessage> messages) {\n ArrayList<Map<String, Object>> msgList = new ArrayList<>();\n for (EMMessage message : messages) {\n msgList.add(EMMessageHelper.toJson(message));\n post(() -> messageChannel.invokeMethod(EMSDKMethod.onMessageReadAck,\n EMMessageHelper.toJson(message)));\n }\n\n post(() -> channel.invokeMethod(EMSDKMethod.onMessagesRead, msgList));\n }\n\n @Override\n public void onMessageDelivered(List<EMMessage> messages) {\n ArrayList<Map<String, Object>> msgList = new ArrayList<>();\n for (EMMessage message : messages) {\n msgList.add(EMMessageHelper.toJson(message));\n post(() -> messageChannel.invokeMethod(EMSDKMethod.onMessageDeliveryAck,\n EMMessageHelper.toJson(message)));\n }\n post(() -> channel.invokeMethod(EMSDKMethod.onMessagesDelivered, msgList));\n }\n\n @Override\n public void onMessageRecalled(List<EMMessage> messages) {\n ArrayList<Map<String, Object>> msgList = new ArrayList<>();\n for (EMMessage message : messages) {\n msgList.add(EMMessageHelper.toJson(message));\n }\n post(() -> channel.invokeMethod(EMSDKMethod.onMessagesRecalled, msgList));\n }\n\n @Override\n public void onGroupMessageRead(List<EMGroupReadAck> var1) {\n ArrayList<Map<String, Object>> msgList = new ArrayList<>();\n for (EMGroupReadAck ack : var1) {\n msgList.add(EMGroupAckHelper.toJson(ack));\n }\n post(() -> channel.invokeMethod(EMSDKMethod.onGroupMessageRead, msgList));\n }\n\n @Override\n public void onReadAckForGroupMessageUpdated() {\n post(() -> channel.invokeMethod(EMSDKMethod.onReadAckForGroupMessageUpdated, null));\n }\n\n @Override\n public void onReactionChanged(List<EMMessageReactionChange> messageReactionChangeList) {\n ArrayList<Map<String, Object>> list = new ArrayList<>();\n for (EMMessageReactionChange change : messageReactionChangeList) {\n list.add(EMMessageReactionChangeHelper.toJson(change));\n }\n post(() -> channel.invokeMethod(EMSDKMethod.onMessageReactionDidChange, list));\n }\n\n @Override\n public void onMessageContentChanged(EMMessage messageModified, String operatorId, long operationTime) {\n Map msgMap = EMMessageHelper.toJson(messageModified);\n Map map = new HashMap<>();\n map.put(\"message\", msgMap);\n map.put(\"operator\", operatorId);\n map.put(\"operationTime\", operationTime);\n post(() -> channel.invokeMethod(EMSDKMethod.onMessageContentChanged, map));\n }\n };\n\n if (conversationListener != null) {\n EMClient.getInstance().chatManager().removeConversationListener(conversationListener);\n }\n conversationListener = new EMConversationListener() {\n\n @Override\n public void onConversationUpdate() {\n Map<String, Object> data = new HashMap<>();\n post(() -> channel.invokeMethod(EMSDKMethod.onConversationUpdate, data));\n }\n\n @Override\n public void onConversationRead(String from, String to) {\n Map<String, Object> data = new HashMap<>();\n data.put(\"from\", from);\n data.put(\"to\", to);\n post(() -> channel.invokeMethod(EMSDKMethod.onConversationHasRead, data));\n }\n };\n\n EMClient.getInstance().chatManager().addMessageListener(messageListener);\n EMClient.getInstance().chatManager().addConversationListener(conversationListener);\n }\n\n private EMConversation.EMSearchDirection searchDirectionFromString(String direction) {\n return direction.equals(\"up\") ? EMConversation.EMSearchDirection.UP : EMConversation.EMSearchDirection.DOWN;\n }\n\n private EMConversation.EMConversationType typeFromInt(int intType) {\n if (intType == 0){\n return EMConversationType.Chat;\n }else if(intType == 1){\n return EMConversationType.GroupChat;\n }else {\n return EMConversationType.ChatRoom;\n }\n }\n}\n" }, { "alpha_fraction": 0.7063086032867432, "alphanum_fraction": 0.7063086032867432, "avg_line_length": 26.279069900512695, "blob_id": "72e804028ee013712aa109e76e188bdaa355d3e1", "content_id": "85ab4d732fd90c154e6c3560c2860f147af01cad", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2486, "license_type": "permissive", "max_line_length": 129, "num_lines": 86, "path": "/FlutterQA.md", "repo_name": "easemob/im_flutter_sdk", "src_encoding": "UTF-8", "text": "iOS如何绑定deviceToken?\n\nOC\n\n```objectivec\n#import \"AppDelegate.h\"\n// 引入环信SDK\n#import <Hyphenate/Hyphenate.h>\n\n@interface AppDelegate ()\n\n@end\n\n@implementation AppDelegate\n\n\n- (BOOL)application:(UIApplication *)application didFinishLaunchingWithOptions:(NSDictionary *)launchOptions {\n\n\n // ...\n\n\n // // 获取UNUserNotificationCenter并申请[badge, alert, sound]权限。\n UNUserNotificationCenter *center = [UNUserNotificationCenter currentNotificationCenter];\n [center requestAuthorizationWithOptions:\n UNAuthorizationOptionAlert | UNAuthorizationOptionBadge | UNAuthorizationOptionSound\n completionHandler:^(BOOL granted, NSError * _Nullable error) {\n if (granted) {\n dispatch_async(dispatch_get_main_queue(), ^{\n // 注册远程推送\n [application registerForRemoteNotifications];\n });\n }\n }];\n\n\n return YES;\n}\n\n// 收到系统deviceToken获取成功回调\n-(void)application:(UIApplication *)application didRegisterForRemoteNotificationsWithDeviceToken:(NSData *)deviceToken{\n // 将deviceToken传给环信SDK\n [EMClient.sharedClient registerForRemoteNotificationsWithDeviceToken:deviceToken completion:nil];\n}\n\n\n\n@end\n```\n\nSwift\n\n```swift\nimport UIKit\nimport Flutter\n// 引入环信SDK\nimport HyphenateChat\n\n@UIApplicationMain\n@objc class AppDelegate: FlutterAppDelegate {\n override func application(\n _ application: UIApplication,\n didFinishLaunchingWithOptions launchOptions: [UIApplication.LaunchOptionsKey: Any]?\n ) -> Bool {\n GeneratedPluginRegistrant.register(with: self);\n\n // 获取UNUserNotificationCenter并申请[badge, alert, sound]权限。\n let center = UNUserNotificationCenter.current();\n center.requestAuthorization(options: [.badge, .alert, .sound]) { granted, error in\n if(granted){\n DispatchQueue.main.async {\n // 注册远程推送\n application.registerForRemoteNotifications();\n }\n }\n }\n\n return super.application(application, didFinishLaunchingWithOptions: launchOptions)\n }\n // 收到系统deviceToken获取成功回调\n override func application(_ application: UIApplication, didRegisterForRemoteNotificationsWithDeviceToken deviceToken: Data) {\n // 将deviceToken传给环信SDK\n EMClient.share.registerForRemoteNotifications(withDeviceToken: deviceToken, completion: nil)\n }\n}\n```\n" }, { "alpha_fraction": 0.5700902938842773, "alphanum_fraction": 0.5706028938293457, "avg_line_length": 43.409732818603516, "blob_id": "3f01b43fa0dcb3bf418c991245ac5f8c71c00add", "content_id": "9137d2b41e06ed2002e42a04733d0f57737dc72d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 56577, "license_type": "permissive", "max_line_length": 178, "num_lines": 1274, "path": "/android/src/main/java/com/easemob/im_flutter_sdk/EMGroupManagerWrapper.java", "repo_name": "easemob/im_flutter_sdk", "src_encoding": "UTF-8", "text": "package com.easemob.im_flutter_sdk;\n\nimport com.hyphenate.EMGroupChangeListener;\nimport com.hyphenate.chat.EMClient;\n\nimport io.flutter.embedding.engine.plugins.FlutterPlugin;\n\nimport io.flutter.plugin.common.MethodChannel.Result;\n\nimport com.hyphenate.chat.EMCursorResult;\nimport com.hyphenate.chat.EMGroup;\nimport com.hyphenate.chat.EMGroupInfo;\nimport com.hyphenate.chat.EMGroupOptions;\nimport com.hyphenate.chat.EMMucSharedFile;\nimport com.hyphenate.exceptions.HyphenateException;\n\nimport org.json.JSONArray;\nimport org.json.JSONException;\nimport org.json.JSONObject;\n\nimport java.util.ArrayList;\nimport java.util.HashMap;\nimport java.util.Iterator;\nimport java.util.List;\nimport java.util.Map;\n\nimport io.flutter.plugin.common.MethodCall;\nimport io.flutter.plugin.common.MethodChannel;\nimport io.flutter.plugin.common.MethodChannel.MethodCallHandler;\n\npublic class EMGroupManagerWrapper extends EMWrapper implements MethodCallHandler {\n\n private EMGroupChangeListener groupChangeListener;\n\n public EMClientWrapper clientWrapper;\n\n EMGroupManagerWrapper(FlutterPlugin.FlutterPluginBinding flutterPluginBinding, String channelName) {\n super(flutterPluginBinding, channelName);\n registerEaseListener();\n }\n\n @Override\n public void onMethodCall(MethodCall call, MethodChannel.Result result) {\n\n JSONObject param = (JSONObject) call.arguments;\n try {\n if (EMSDKMethod.getGroupWithId.equals(call.method)) {\n getGroupWithId(param, call.method, result);\n } else if (EMSDKMethod.getJoinedGroups.equals(call.method)) {\n getJoinedGroups(param, call.method, result);\n } else if (EMSDKMethod.getJoinedGroupsFromServer.equals(call.method)) {\n getJoinedGroupsFromServer(param, call.method, result);\n } else if (EMSDKMethod.getPublicGroupsFromServer.equals(call.method)) {\n getPublicGroupsFromServer(param, call.method, result);\n } else if (EMSDKMethod.createGroup.equals(call.method)) {\n createGroup(param, call.method, result);\n } else if (EMSDKMethod.getGroupSpecificationFromServer.equals(call.method)) {\n getGroupSpecificationFromServer(param, call.method, result);\n } else if (EMSDKMethod.getGroupMemberListFromServer.equals(call.method)) {\n getGroupMemberListFromServer(param, call.method, result);\n } else if (EMSDKMethod.getGroupMuteListFromServer.equals(call.method)) {\n getGroupMuteListFromServer(param, call.method, result);\n } else if (EMSDKMethod.getGroupWhiteListFromServer.equals(call.method)) {\n getGroupWhiteListFromServer(param, call.method, result);\n } else if (EMSDKMethod.isMemberInWhiteListFromServer.equals(call.method)) {\n isMemberInWhiteListFromServer(param, call.method, result);\n } else if (EMSDKMethod.getGroupFileListFromServer.equals(call.method)) {\n getGroupFileListFromServer(param, call.method, result);\n } else if (EMSDKMethod.getGroupAnnouncementFromServer.equals(call.method)) {\n getGroupAnnouncementFromServer(param, call.method, result);\n } else if (EMSDKMethod.getGroupBlockListFromServer.equals(call.method)) {\n getGroupBlockListFromServer(param, call.method, result);\n } else if (EMSDKMethod.addMembers.equals(call.method)) {\n addMembers(param, call.method, result);\n } else if (EMSDKMethod.inviterUser.equals(call.method)){\n inviterUser(param, call.method, result);\n } else if (EMSDKMethod.removeMembers.equals(call.method)) {\n removeMembers(param, call.method, result);\n } else if (EMSDKMethod.blockMembers.equals(call.method)) {\n blockMembers(param, call.method, result);\n } else if (EMSDKMethod.unblockMembers.equals(call.method)) {\n unblockMembers(param, call.method, result);\n } else if (EMSDKMethod.updateGroupSubject.equals(call.method)) {\n updateGroupSubject(param, call.method, result);\n } else if (EMSDKMethod.updateDescription.equals(call.method)) {\n updateDescription(param, call.method, result);\n } else if (EMSDKMethod.leaveGroup.equals(call.method)) {\n leaveGroup(param, call.method, result);\n } else if (EMSDKMethod.destroyGroup.equals(call.method)) {\n destroyGroup(param, call.method, result);\n } else if (EMSDKMethod.blockGroup.equals(call.method)) {\n blockGroup(param, call.method, result);\n } else if (EMSDKMethod.unblockGroup.equals(call.method)) {\n unblockGroup(param, call.method, result);\n } else if (EMSDKMethod.updateGroupOwner.equals(call.method)) {\n updateGroupOwner(param, call.method, result);\n } else if (EMSDKMethod.addAdmin.equals(call.method)) {\n addAdmin(param, call.method, result);\n } else if (EMSDKMethod.removeAdmin.equals(call.method)) {\n removeAdmin(param, call.method, result);\n } else if (EMSDKMethod.muteMembers.equals(call.method)) {\n muteMembers(param, call.method, result);\n } else if (EMSDKMethod.unMuteMembers.equals(call.method)) {\n unMuteMembers(param, call.method, result);\n } else if (EMSDKMethod.muteAllMembers.equals(call.method)) {\n muteAllMembers(param, call.method, result);\n } else if (EMSDKMethod.unMuteAllMembers.equals(call.method)) {\n unMuteAllMembers(param, call.method, result);\n } else if (EMSDKMethod.addWhiteList.equals(call.method)) {\n addWhiteList(param, call.method, result);\n } else if (EMSDKMethod.removeWhiteList.equals(call.method)) {\n removeWhiteList(param, call.method, result);\n } else if (EMSDKMethod.uploadGroupSharedFile.equals(call.method)) {\n uploadGroupSharedFile(param, call.method, result);\n } else if (EMSDKMethod.downloadGroupSharedFile.equals(call.method)) {\n downloadGroupSharedFile(param, call.method, result);\n } else if (EMSDKMethod.removeGroupSharedFile.equals(call.method)) {\n removeGroupSharedFile(param, call.method, result);\n } else if (EMSDKMethod.updateGroupAnnouncement.equals(call.method)) {\n updateGroupAnnouncement(param, call.method, result);\n } else if (EMSDKMethod.updateGroupExt.equals(call.method)) {\n updateGroupExt(param, call.method, result);\n } else if (EMSDKMethod.joinPublicGroup.equals(call.method)) {\n joinPublicGroup(param, call.method, result);\n } else if (EMSDKMethod.requestToJoinPublicGroup.equals(call.method)) {\n requestToJoinPublicGroup(param, call.method, result);\n } else if (EMSDKMethod.acceptJoinApplication.equals(call.method)) {\n acceptJoinApplication(param, call.method, result);\n } else if (EMSDKMethod.declineJoinApplication.equals(call.method)) {\n declineJoinApplication(param, call.method, result);\n } else if (EMSDKMethod.acceptInvitationFromGroup.equals(call.method)) {\n acceptInvitationFromGroup(param, call.method, result);\n } else if (EMSDKMethod.declineInvitationFromGroup.equals(call.method)) {\n declineInvitationFromGroup(param, call.method, result);\n } else if (EMSDKMethod.setMemberAttributesFromGroup.equals(call.method)) {\n setMemberAttributes(param, call.method, result);\n } else if (EMSDKMethod.removeMemberAttributesFromGroup.equals(call.method)) {\n removeMemberAttributes(param, call.method, result);\n } else if (EMSDKMethod.fetchMemberAttributesFromGroup.equals(call.method)) {\n fetchMemberAttributes(param, call.method, result);\n } else if (EMSDKMethod.fetchMembersAttributesFromGroup.equals(call.method)) {\n fetchMembersAttributes(param, call.method, result);\n } else {\n super.onMethodCall(call, result);\n }\n } catch (JSONException e) {\n e.printStackTrace();\n }\n }\n\n private void getGroupWithId(JSONObject param, String channelName, Result result) throws JSONException {\n String groupId = param.getString(\"groupId\");\n asyncRunnable(() -> {\n EMGroup group = EMClient.getInstance().groupManager().getGroup(groupId);\n if (group != null) {\n onSuccess(result, channelName, EMGroupHelper.toJson(group));\n } else {\n onSuccess(result, channelName, null);\n }\n });\n }\n\n private void getJoinedGroups(JSONObject param, String channelName, Result result) throws JSONException {\n asyncRunnable(() -> {\n List<EMGroup> groups = EMClient.getInstance().groupManager().getAllGroups();\n List<Map> groupList = new ArrayList<>();\n for (EMGroup group : groups) {\n groupList.add(EMGroupHelper.toJson(group));\n }\n onSuccess(result, channelName, groupList);\n });\n }\n\n private void getJoinedGroupsFromServer(JSONObject param, String channelName, Result result) throws JSONException {\n\n int pageSize = 0;\n if (param.has(\"pageSize\")){\n pageSize = param.getInt(\"pageSize\");\n }\n int pageNum = 0;\n if (param.has(\"pageNum\")){\n pageNum = param.getInt(\"pageNum\");\n }\n\n boolean needMemberCount = false;\n if (param.has(\"needMemberCount\")) {\n needMemberCount = param.getBoolean(\"needMemberCount\");\n }\n\n boolean needRole = false;\n if (param.has(\"needRole\")) {\n needRole = param.getBoolean(\"needRole\");\n }\n\n EMValueWrapperCallBack<List<EMGroup>> callBack = new EMValueWrapperCallBack<List<EMGroup>>(result,\n channelName) {\n @Override\n public void onSuccess(List<EMGroup> object) {\n List<Map> groupList = new ArrayList<>();\n for (EMGroup group : object) {\n groupList.add(EMGroupHelper.toJson(group));\n }\n updateObject(groupList);\n }\n };\n\n EMClient.getInstance().groupManager().asyncGetJoinedGroupsFromServer(pageNum, pageSize, needMemberCount, needRole,callBack);\n }\n\n private void getPublicGroupsFromServer(JSONObject param, String channelName, Result result) throws JSONException {\n int pageSize = 0;\n if (param.has(\"pageSize\")){\n pageSize = param.getInt(\"pageSize\");\n }\n String cursor = null;\n if (param.has(\"cursor\")){\n cursor = param.getString(\"cursor\");\n }\n EMValueWrapperCallBack<EMCursorResult<EMGroupInfo>> callBack = new EMValueWrapperCallBack<EMCursorResult<EMGroupInfo>>(\n result, channelName) {\n @Override\n public void onSuccess(EMCursorResult<EMGroupInfo> object) {\n updateObject(EMCursorResultHelper.toJson(object));\n }\n };\n\n EMClient.getInstance().groupManager().asyncGetPublicGroupsFromServer(pageSize, cursor, callBack);\n }\n\n private void createGroup(JSONObject param, String channelName, Result result) throws JSONException {\n String groupName = null;\n\n if (param.has(\"groupName\")){\n groupName = param.getString(\"groupName\");\n }\n\n String desc = null;\n if(param.has(\"desc\")){\n desc = param.getString(\"desc\");\n }\n\n String[] members = null;\n if(param.has(\"inviteMembers\")){\n JSONArray inviteMembers = param.getJSONArray(\"inviteMembers\");\n members = new String[inviteMembers.length()];\n for (int i = 0; i < inviteMembers.length(); i++) {\n members[i] = inviteMembers.getString(i);\n }\n }\n if (members == null) {\n members = new String[0];\n }\n String inviteReason = null;\n\n if (param.has(\"inviteReason\")){\n inviteReason = param.getString(\"inviteReason\");\n }\n\n EMGroupOptions options = EMGroupOptionsHelper.fromJson(param.getJSONObject(\"options\"));\n\n EMValueWrapperCallBack<EMGroup> callBack = new EMValueWrapperCallBack<EMGroup>(result, channelName) {\n @Override\n public void onSuccess(EMGroup object) {\n updateObject(EMGroupHelper.toJson(object));\n }\n };\n\n EMClient.getInstance().groupManager().asyncCreateGroup(groupName, desc, members, inviteReason, options,\n callBack);\n }\n\n private void getGroupSpecificationFromServer(JSONObject param, String channelName, Result result)\n throws JSONException {\n String groupId = param.getString(\"groupId\");\n boolean fetchMembers = param.getBoolean(\"fetchMembers\");\n asyncRunnable(() -> {\n try {\n EMGroup group = EMClient.getInstance().groupManager().getGroupFromServer(groupId, fetchMembers);\n onSuccess(result, channelName, EMGroupHelper.toJson(group));\n } catch (HyphenateException e) {\n onError(result, e);\n }\n });\n }\n\n private void getGroupMemberListFromServer(JSONObject param, String channelName, Result result)\n throws JSONException {\n String groupId = param.getString(\"groupId\");\n String cursor = null;\n if(param.has(\"cursor\")){\n cursor = param.getString(\"cursor\");\n }\n int pageSize = param.getInt(\"pageSize\");\n\n EMValueWrapperCallBack<EMCursorResult<String>> callBack = new EMValueWrapperCallBack<EMCursorResult<String>>(\n result, channelName) {\n @Override\n public void onSuccess(EMCursorResult<String> object) {\n updateObject(EMCursorResultHelper.toJson(object));\n }\n };\n\n EMClient.getInstance().groupManager().asyncFetchGroupMembers(groupId, cursor, pageSize, callBack);\n\n }\n\n private void getGroupBlockListFromServer(JSONObject param, String channelName, Result result) throws JSONException {\n String groupId = param.getString(\"groupId\");\n int pageSize = 0;\n if (param.has(\"pageSize\")){\n pageSize = param.getInt(\"pageSize\");\n }\n int pageNum = 0;\n if (param.has(\"pageNum\")){\n pageNum = param.getInt(\"pageNum\");\n }\n\n EMClient.getInstance().groupManager().asyncGetBlockedUsers(groupId, pageNum, pageSize,\n new EMValueWrapperCallBack<List<String>>(result, channelName));\n }\n\n private void getGroupMuteListFromServer(JSONObject param, String channelName, Result result) throws JSONException {\n String groupId = param.getString(\"groupId\");\n int pageSize = 0;\n if (param.has(\"pageSize\")){\n pageSize = param.getInt(\"pageSize\");\n }\n int pageNum = 0;\n if (param.has(\"pageNum\")){\n pageNum = param.getInt(\"pageNum\");\n }\n\n\n EMValueWrapperCallBack<Map<String, Long>> callBack = new EMValueWrapperCallBack<Map<String, Long>>(result,\n channelName) {\n @Override\n public void onSuccess(Map<String, Long> object) {\n updateObject(object);\n }\n };\n\n EMClient.getInstance().groupManager().asyncFetchGroupMuteList(groupId, pageNum, pageSize, callBack);\n }\n\n private void getGroupWhiteListFromServer(JSONObject param, String channelName, Result result) throws JSONException {\n String groupId = param.getString(\"groupId\");\n EMClient.getInstance().groupManager().fetchGroupWhiteList(groupId,\n new EMValueWrapperCallBack<List<String>>(result, channelName));\n }\n\n private void isMemberInWhiteListFromServer(JSONObject param, String channelName, Result result)\n throws JSONException {\n String groupId = param.getString(\"groupId\");\n EMClient.getInstance().groupManager().checkIfInGroupWhiteList(groupId,\n new EMValueWrapperCallBack<Boolean>(result, channelName));\n }\n\n private void getGroupFileListFromServer(JSONObject param, String channelName, Result result) throws JSONException {\n String groupId = param.getString(\"groupId\");\n int pageNum = 0;\n if (param.has(\"pageNum\")){\n pageNum = param.getInt(\"pageNum\");\n }\n int pageSize = 0;\n if (param.has(\"pageSize\")) {\n pageSize = param.getInt(\"pageSize\");\n }\n\n EMValueWrapperCallBack<List<EMMucSharedFile>> callBack = new EMValueWrapperCallBack<List<EMMucSharedFile>>(\n result, channelName) {\n @Override\n public void onSuccess(List<EMMucSharedFile> object) {\n List<Map> fileList = new ArrayList<>();\n for (EMMucSharedFile file : object) {\n fileList.add(EMMucSharedFileHelper.toJson(file));\n }\n updateObject(fileList);\n }\n };\n\n EMClient.getInstance().groupManager().asyncFetchGroupSharedFileList(groupId, pageNum, pageSize, callBack);\n }\n\n private void getGroupAnnouncementFromServer(JSONObject param, String channelName, Result result)\n throws JSONException {\n String groupId = param.getString(\"groupId\");\n EMClient.getInstance().groupManager().asyncFetchGroupAnnouncement(groupId,\n new EMValueWrapperCallBack<String>(result, channelName));\n }\n\n private void inviterUser(JSONObject param, String channelName, Result result) throws JSONException {\n String groupId = param.getString(\"groupId\");\n String reason = null;\n if (param.has(\"reason\")) {\n reason = param.getString(\"reason\");\n }\n String[] members = null;\n if (param.has(\"members\")){\n JSONArray array = param.getJSONArray(\"members\");\n members = new String[array.length()];\n for (int i = 0; i < array.length(); i++) {\n members[i] = array.getString(i);\n }\n }\n if (members == null) {\n members = new String[0];\n }\n EMClient.getInstance().groupManager().asyncInviteUser(groupId, members, reason,\n new EMWrapperCallBack(result, channelName, true));\n }\n\n private void addMembers(JSONObject param, String channelName, Result result) throws JSONException {\n String groupId = param.getString(\"groupId\");\n\n String[] members = null;\n if (param.has(\"members\")){\n JSONArray array = param.getJSONArray(\"members\");\n members = new String[array.length()];\n for (int i = 0; i < array.length(); i++) {\n members[i] = array.getString(i);\n }\n }\n if (members == null) {\n members = new String[0];\n }\n\n String welcome = null;\n if (param.has(\"welcome\")){\n welcome = param.getString(\"welcome\");\n }\n String finalWelcome = welcome;\n String[] finalMembers = members;\n asyncRunnable(() -> {\n try {\n EMClient.getInstance().groupManager().addUsersToGroup(groupId, finalMembers, finalWelcome);\n onSuccess(result, channelName, true);\n } catch (HyphenateException e) {\n onError(result, e);\n }\n });\n }\n\n private void removeMembers(JSONObject param, String channelName, Result result) throws JSONException {\n String groupId = param.getString(\"groupId\");\n List<String> members = new ArrayList<>();\n if (param.has(\"members\")){\n JSONArray array = param.getJSONArray(\"members\");\n for (int i = 0; i < array.length(); i++) {\n members.add(array.getString(i));\n }\n }\n\n EMClient.getInstance().groupManager().asyncRemoveUsersFromGroup(groupId, members,\n new EMWrapperCallBack(result, channelName, true));\n }\n\n private void blockMembers(JSONObject param, String channelName, Result result) throws JSONException {\n String groupId = param.getString(\"groupId\");\n List<String> members = new ArrayList<>();\n if (param.has(\"members\")){\n JSONArray array = param.getJSONArray(\"members\");\n for (int i = 0; i < array.length(); i++) {\n members.add(array.getString(i));\n }\n }\n\n EMClient.getInstance().groupManager().asyncBlockUsers(groupId, members,\n new EMWrapperCallBack(result, channelName, true));\n }\n\n private void unblockMembers(JSONObject param, String channelName, Result result) throws JSONException {\n String groupId = param.getString(\"groupId\");\n List<String> members = new ArrayList<>();\n if (param.has(\"members\")){\n JSONArray array = param.getJSONArray(\"members\");\n for (int i = 0; i < array.length(); i++) {\n members.add(array.getString(i));\n }\n }\n\n EMClient.getInstance().groupManager().asyncUnblockUsers(groupId, members,\n new EMWrapperCallBack(result, channelName, true));\n }\n\n private void updateGroupSubject(JSONObject param, String channelName, Result result) throws JSONException {\n String groupId = param.getString(\"groupId\");\n\n String name = \"\";\n if (param.has(\"name\")){\n name = param.getString(\"name\");\n }\n\n EMClient.getInstance().groupManager().asyncChangeGroupName(groupId, name, new EMWrapperCallBack(result, channelName, null));\n\n }\n\n private void updateDescription(JSONObject param, String channelName, Result result) throws JSONException {\n String groupId = param.getString(\"groupId\");\n String desc = \"\";\n if (param.has(\"desc\")){\n desc = param.getString(\"desc\");\n }\n\n EMClient.getInstance().groupManager().asyncChangeGroupDescription(groupId, desc, new EMWrapperCallBack(result, channelName, null) );\n }\n\n private void leaveGroup(JSONObject param, String channelName, Result result) throws JSONException {\n String groupId = param.getString(\"groupId\");\n EMClient.getInstance().groupManager().asyncLeaveGroup(groupId,\n new EMWrapperCallBack(result, channelName, true));\n }\n\n private void destroyGroup(JSONObject param, String channelName, Result result) throws JSONException {\n String groupId = param.getString(\"groupId\");\n EMClient.getInstance().groupManager().asyncDestroyGroup(groupId,\n new EMWrapperCallBack(result, channelName, true));\n }\n\n private void blockGroup(JSONObject param, String channelName, Result result) throws JSONException {\n String groupId = param.getString(\"groupId\");\n EMClient.getInstance().groupManager().asyncBlockGroupMessage(groupId, new EMWrapperCallBack(result, channelName, null) );\n }\n\n private void unblockGroup(JSONObject param, String channelName, Result result) throws JSONException {\n String groupId = param.getString(\"groupId\");\n\n EMClient.getInstance().groupManager().asyncUnblockGroupMessage(groupId, new EMWrapperCallBack(result, channelName, null));\n }\n\n private void updateGroupOwner(JSONObject param, String channelName, Result result) throws JSONException {\n String groupId = param.getString(\"groupId\");\n String newOwner = param.getString(\"owner\");\n\n EMValueWrapperCallBack<EMGroup> callBack = new EMValueWrapperCallBack<EMGroup>(result, channelName) {\n @Override\n public void onSuccess(EMGroup object) {\n super.updateObject(EMGroupHelper.toJson(object));\n }\n };\n\n EMClient.getInstance().groupManager().asyncChangeOwner(groupId, newOwner, callBack);\n }\n\n private void addAdmin(JSONObject param, String channelName, Result result) throws JSONException {\n String groupId = param.getString(\"groupId\");\n String admin = param.getString(\"admin\");\n\n EMValueWrapperCallBack<EMGroup> callBack = new EMValueWrapperCallBack<EMGroup>(result, channelName) {\n @Override\n public void onSuccess(EMGroup object) {\n super.updateObject(EMGroupHelper.toJson(object));\n }\n };\n\n EMClient.getInstance().groupManager().asyncAddGroupAdmin(groupId, admin, callBack);\n }\n\n private void removeAdmin(JSONObject param, String channelName, Result result) throws JSONException {\n String groupId = param.getString(\"groupId\");\n String admin = param.getString(\"admin\");\n\n EMValueWrapperCallBack<EMGroup> callBack = new EMValueWrapperCallBack<EMGroup>(result, channelName) {\n @Override\n public void onSuccess(EMGroup object) {\n super.updateObject(EMGroupHelper.toJson(object));\n }\n };\n\n EMClient.getInstance().groupManager().asyncRemoveGroupAdmin(groupId, admin, callBack);\n }\n\n private void muteMembers(JSONObject param, String channelName, Result result) throws JSONException {\n String groupId = param.getString(\"groupId\");\n\n int duration = 0;\n if (param.has(\"duration\")){\n duration = param.getInt(\"duration\");\n }\n List<String> members = new ArrayList<>();\n if (param.has(\"members\")){\n JSONArray array = param.getJSONArray(\"members\");\n for (int i = 0; i < array.length(); i++) {\n members.add(array.getString(i));\n }\n }\n EMValueWrapperCallBack<EMGroup> callBack = new EMValueWrapperCallBack<EMGroup>(result, channelName) {\n @Override\n public void onSuccess(EMGroup object) {\n super.updateObject(EMGroupHelper.toJson(object));\n }\n };\n\n EMClient.getInstance().groupManager().asyncMuteGroupMembers(groupId, members, duration, callBack);\n }\n\n private void unMuteMembers(JSONObject param, String channelName, Result result) throws JSONException {\n String groupId = param.getString(\"groupId\");\n List<String> members = new ArrayList<>();\n if (param.has(\"members\")){\n JSONArray array = param.getJSONArray(\"members\");\n for (int i = 0; i < array.length(); i++) {\n members.add(array.getString(i));\n }\n }\n EMValueWrapperCallBack<EMGroup> callBack = new EMValueWrapperCallBack<EMGroup>(result, channelName) {\n @Override\n public void onSuccess(EMGroup object) {\n super.updateObject(EMGroupHelper.toJson(object));\n }\n };\n\n EMClient.getInstance().groupManager().asyncUnMuteGroupMembers(groupId, members, callBack);\n }\n\n private void muteAllMembers(JSONObject param, String channelName, Result result) throws JSONException {\n String groupId = param.getString(\"groupId\");\n\n EMValueWrapperCallBack<EMGroup> callBack = new EMValueWrapperCallBack<EMGroup>(result, channelName) {\n @Override\n public void onSuccess(EMGroup object) {\n super.updateObject(EMGroupHelper.toJson(object));\n }\n };\n\n EMClient.getInstance().groupManager().muteAllMembers(groupId, callBack);\n }\n\n private void unMuteAllMembers(JSONObject param, String channelName, Result result) throws JSONException {\n String groupId = param.getString(\"groupId\");\n\n EMValueWrapperCallBack<EMGroup> callBack = new EMValueWrapperCallBack<EMGroup>(result, channelName) {\n @Override\n public void onSuccess(EMGroup object) {\n super.updateObject(EMGroupHelper.toJson(object));\n }\n };\n\n EMClient.getInstance().groupManager().unmuteAllMembers(groupId, callBack);\n }\n\n private void addWhiteList(JSONObject param, String channelName, Result result) throws JSONException {\n String groupId = param.getString(\"groupId\");\n List<String> members = new ArrayList<>();\n if (param.has(\"members\")){\n JSONArray array = param.getJSONArray(\"members\");\n for (int i = 0; i < array.length(); i++) {\n members.add(array.getString(i));\n }\n }\n EMClient.getInstance().groupManager().addToGroupWhiteList(groupId, members,\n new EMWrapperCallBack(result, channelName, true));\n }\n\n private void removeWhiteList(JSONObject param, String channelName, Result result) throws JSONException {\n String groupId = param.getString(\"groupId\");\n List<String> members = new ArrayList<>();\n if (param.has(\"members\")){\n JSONArray array = param.getJSONArray(\"members\");\n for (int i = 0; i < array.length(); i++) {\n members.add(array.getString(i));\n }\n }\n EMClient.getInstance().groupManager().removeFromGroupWhiteList(groupId, members,\n new EMWrapperCallBack(result, channelName, true));\n }\n\n private void uploadGroupSharedFile(JSONObject param, String channelName, Result result) throws JSONException {\n String groupId = param.getString(\"groupId\");\n String filePath = null;\n if (param.has(\"filePath\")){\n filePath = param.getString(\"filePath\");\n }\n\n EMClient.getInstance().groupManager().asyncUploadGroupSharedFile(groupId, filePath,\n new EMWrapperCallBack(result, channelName, true));\n }\n\n private void downloadGroupSharedFile(JSONObject param, String channelName, Result result) throws JSONException {\n String groupId = param.getString(\"groupId\");\n String fileId = null;\n if (param.has(\"fileId\")) {\n fileId = param.getString(\"fileId\");\n }\n String savePath = null;\n if (param.has(\"savePath\")) {\n savePath = param.getString(\"savePath\");\n }\n\n EMClient.getInstance().groupManager().asyncDownloadGroupSharedFile(groupId, fileId, savePath,\n new EMDownloadCallback(fileId, savePath){\n @Override\n public void onSuccess() {\n clientWrapper.progressManager.sendDownloadSuccessToFlutter(fileId, savePath);\n }\n\n @Override\n public void onProgress(int progress, String status) {\n clientWrapper.progressManager.sendDownloadProgressToFlutter(fileId, progress);\n }\n\n @Override\n public void onError(int code, String error) {\n HyphenateException e = new HyphenateException(code, error);\n clientWrapper.progressManager.sendDownloadErrorToFlutter(fileId, e);\n }\n });\n\n post(()->{\n onSuccess(result, channelName, true);\n });\n }\n\n private void removeGroupSharedFile(JSONObject param, String channelName, Result result) throws JSONException {\n String groupId = param.getString(\"groupId\");\n String fileId = null;\n if (param.has(\"fileId\")) {\n fileId = param.getString(\"fileId\");\n }\n EMClient.getInstance().groupManager().asyncDeleteGroupSharedFile(groupId, fileId,\n new EMWrapperCallBack(result, channelName, true));\n }\n\n private void updateGroupAnnouncement(JSONObject param, String channelName, Result result) throws JSONException {\n String groupId = param.getString(\"groupId\");\n String announcement = null;\n if (param.has(\"announcement\")) {\n announcement = param.getString(\"announcement\");\n }\n\n EMClient.getInstance().groupManager().asyncUpdateGroupAnnouncement(groupId, announcement, new EMWrapperCallBack(result, channelName, null) );\n }\n\n private void updateGroupExt(JSONObject param, String channelName, Result result) throws JSONException {\n String groupId = param.getString(\"groupId\");\n String ext = null;\n if (param.has(\"ext\")) {\n ext = param.getString(\"ext\");\n }\n\n String finalExt = ext;\n asyncRunnable(() -> {\n try {\n EMGroup group = EMClient.getInstance().groupManager().updateGroupExtension(groupId, finalExt);\n onSuccess(result, channelName, EMGroupHelper.toJson(group));\n } catch (HyphenateException e) {\n onError(result, e);\n }\n });\n }\n\n private void joinPublicGroup(JSONObject param, String channelName, Result result) throws JSONException {\n\n String groupId = param.getString(\"groupId\");\n asyncRunnable(()->{\n try{\n EMGroup group = EMClient.getInstance().groupManager().getGroupFromServer(groupId);\n if (group.isMemberOnly()){\n throw new HyphenateException(603,\"User has no permission for this operation\");\n }\n EMClient.getInstance().groupManager().joinGroup(groupId);\n\n onSuccess(result, channelName, null);\n }catch (HyphenateException e){\n onError(result, e);\n }\n });\n }\n\n private void requestToJoinPublicGroup(JSONObject param, String channelName, Result result) throws JSONException {\n String groupId = param.getString(\"groupId\");\n String reason = null;\n if (param.has(\"reason\")){\n reason = param.getString(\"reason\");\n }\n\n EMClient.getInstance().groupManager().asyncApplyJoinToGroup(groupId, reason, new EMWrapperCallBack(result, channelName, null));\n }\n\n private void acceptJoinApplication(JSONObject param, String channelName, Result result) throws JSONException {\n String groupId = param.getString(\"groupId\");\n\n String username = null;\n if (param.has(\"username\")){\n username = param.getString(\"username\");\n }\n\n EMClient.getInstance().groupManager().asyncAcceptApplication(username, groupId, new EMWrapperCallBack(result, channelName, null));\n }\n\n private void declineJoinApplication(JSONObject param, String channelName, Result result) throws JSONException {\n String groupId = param.getString(\"groupId\");\n String username = null;\n if (param.has(\"username\")){\n username = param.getString(\"username\");\n }\n String reason = null;\n if (param.has(\"reason\")){\n reason = param.getString(\"reason\");\n }\n\n EMClient.getInstance().groupManager().asyncDeclineApplication(username, groupId, reason, new EMWrapperCallBack(result, channelName, null));\n }\n\n private void acceptInvitationFromGroup(JSONObject param, String channelName, Result result) throws JSONException {\n String groupId = param.getString(\"groupId\");\n\n String inviter = null;\n if (param.has(\"inviter\")){\n inviter = param.getString(\"inviter\");\n }\n EMValueWrapperCallBack<EMGroup> callBack = new EMValueWrapperCallBack<EMGroup>(result, channelName) {\n @Override\n public void onSuccess(EMGroup object) {\n super.updateObject(EMGroupHelper.toJson(object));\n }\n };\n\n EMClient.getInstance().groupManager().asyncAcceptInvitation(groupId, inviter, callBack);\n }\n\n private void declineInvitationFromGroup(JSONObject param, String channelName, Result result) throws JSONException {\n String groupId = param.getString(\"groupId\");\n String username = null;\n if (param.has(\"username\")){\n username = param.getString(\"username\");\n }\n String reason = null;\n if (param.has(\"reason\")){\n reason = param.getString(\"reason\");\n }\n\n EMClient.getInstance().groupManager().asyncDeclineInvitation(groupId, username, reason, new EMWrapperCallBack(result, channelName, null));\n }\n\n private void setMemberAttributes(JSONObject param, String channelName, Result result) throws JSONException {\n String groupId = param.getString(\"groupId\");\n String userId = param.optString(\"userId\");\n if (userId == \"\") {\n userId = EMClient.getInstance().getCurrentUser();\n }\n Map<String, String> attributes = new HashMap<>();\n\n JSONObject jsonObject = param.getJSONObject(\"attributes\");\n Iterator iterator = jsonObject.keys();\n while (iterator.hasNext()) {\n String key = iterator.next().toString();\n attributes.put(key, jsonObject.getString(key));\n }\n\n EMClient.getInstance().groupManager().asyncSetGroupMemberAttributes(groupId, userId, attributes, new EMWrapperCallBack(result, channelName, null));\n }\n\n private void removeMemberAttributes(JSONObject param, String channelName, Result result) throws JSONException {\n String groupId = param.getString(\"groupId\");\n String userId = param.optString(\"userId\");\n if (userId == \"\") {\n userId = EMClient.getInstance().getCurrentUser();\n }\n Map<String, String> attributes = new HashMap<>();\n\n JSONArray ja = param.getJSONArray(\"keys\");\n for (int i = 0; i < ja.length(); i++) {\n attributes.put(ja.optString(i),\"\" );\n }\n\n EMClient.getInstance().groupManager().asyncSetGroupMemberAttributes(groupId, userId, attributes, new EMWrapperCallBack(result, channelName, null));\n }\n\n private void fetchMemberAttributes(JSONObject param, String channelName, Result result) throws JSONException {\n String groupId = param.getString(\"groupId\");\n String userId = param.optString(\"userId\");\n if (userId == \"\") {\n userId = EMClient.getInstance().getCurrentUser();\n }\n String finalUserId = userId;\n EMClient.getInstance().groupManager().asyncFetchGroupMemberAllAttributes(groupId, userId, new EMValueWrapperCallBack<Map<String,Map<String,String>>>(result, channelName){\n\n @Override\n public void onSuccess(Map<String, Map<String, String>> object) {\n updateObject(object.get(finalUserId));\n }\n });\n }\n\n private void fetchMembersAttributes(JSONObject param, String channelName, Result result) throws JSONException {\n String groupId = param.getString(\"groupId\");\n JSONArray jUsers = param.getJSONArray(\"userIds\");\n List<String> userIds = new ArrayList<>();\n for (int i = 0; i < jUsers.length(); i++) {\n userIds.add(jUsers.getString(i));\n }\n List<String> keys = new ArrayList<>();\n if (param.has(\"keys\")) {\n JSONArray jsonArray = param.getJSONArray(\"keys\");\n for (int i = 0; i < jsonArray.length(); i++) {\n keys.add(jsonArray.getString(i));\n }\n }\n EMValueWrapperCallBack callback = new EMValueWrapperCallBack<Map<String, Map<String, String>>>(result, channelName) {\n @Override\n public void onSuccess(Map<String, Map<String, String>> object) {\n updateObject(object);\n }\n };\n\n EMClient.getInstance().groupManager().asyncFetchGroupMembersAttributes(groupId, userIds, keys, callback);\n }\n\n private void registerEaseListener() {\n\n if (groupChangeListener != null) {\n EMClient.getInstance().groupManager().removeGroupChangeListener(groupChangeListener);\n }\n groupChangeListener = new EMGroupChangeListener() {\n\n @Override\n public void onWhiteListAdded(String groupId, List<String> whitelist) {\n EMListenerHandle.getInstance().addHandle(\n ()-> {\n Map<String, Object> data = new HashMap<>();\n data.put(\"type\", \"onGroupWhiteListAdded\");\n data.put(\"groupId\", groupId);\n data.put(\"whitelist\", whitelist);\n post(() -> channel.invokeMethod(EMSDKMethod.onGroupChanged, data));\n }\n );\n }\n\n @Override\n public void onWhiteListRemoved(String groupId, List<String> whitelist) {\n EMListenerHandle.getInstance().addHandle(\n ()-> {\n Map<String, Object> data = new HashMap<>();\n data.put(\"type\", \"onGroupWhiteListRemoved\");\n data.put(\"groupId\", groupId);\n data.put(\"whitelist\", whitelist);\n post(() -> channel.invokeMethod(EMSDKMethod.onGroupChanged, data));\n }\n );\n }\n\n @Override\n public void onAllMemberMuteStateChanged(String groupId, boolean isMuted) {\n EMListenerHandle.getInstance().addHandle(\n ()-> {\n Map<String, Object> data = new HashMap<>();\n data.put(\"type\", \"onGroupAllMemberMuteStateChanged\");\n data.put(\"groupId\", groupId);\n data.put(\"isMuted\", isMuted);\n post(() -> channel.invokeMethod(EMSDKMethod.onGroupChanged, data));\n }\n );\n }\n\n @Override\n public void onInvitationReceived(String groupId, String groupName, String inviter, String reason) {\n EMListenerHandle.getInstance().addHandle(\n ()-> {\n Map<String, Object> data = new HashMap<>();\n data.put(\"type\", \"onGroupInvitationReceived\");\n data.put(\"groupId\", groupId);\n data.put(\"groupName\", groupName);\n data.put(\"inviter\", inviter);\n data.put(\"reason\", reason);\n post(() -> channel.invokeMethod(EMSDKMethod.onGroupChanged, data));\n }\n );\n }\n\n @Override\n public void onRequestToJoinReceived(String groupId, String groupName, String applicant, String reason) {\n EMListenerHandle.getInstance().addHandle(\n ()-> {\n Map<String, Object> data = new HashMap<>();\n data.put(\"type\", \"onGroupRequestToJoinReceived\");\n data.put(\"groupId\", groupId);\n data.put(\"groupName\", groupName);\n data.put(\"applicant\", applicant);\n data.put(\"reason\", reason);\n post(() -> channel.invokeMethod(EMSDKMethod.onGroupChanged, data));\n }\n );\n\n }\n\n @Override\n public void onRequestToJoinAccepted(String groupId, String groupName, String accepter) {\n EMListenerHandle.getInstance().addHandle(\n ()-> {\n Map<String, Object> data = new HashMap<>();\n data.put(\"type\", \"onGroupRequestToJoinAccepted\");\n data.put(\"groupId\", groupId);\n data.put(\"groupName\", groupName);\n data.put(\"accepter\", accepter);\n post(() -> channel.invokeMethod(EMSDKMethod.onGroupChanged, data));\n }\n );\n\n }\n\n @Override\n public void onRequestToJoinDeclined(String groupId, String groupName, String decliner, String reason) {\n EMListenerHandle.getInstance().addHandle(\n ()-> {\n Map<String, Object> data = new HashMap<>();\n data.put(\"type\", \"onGroupRequestToJoinDeclined\");\n data.put(\"groupId\", groupId);\n data.put(\"groupName\", groupName);\n data.put(\"decliner\", decliner);\n data.put(\"reason\", reason);\n post(() -> channel.invokeMethod(EMSDKMethod.onGroupChanged, data));\n }\n );\n\n }\n\n @Override\n public void onInvitationAccepted(String groupId, String invitee, String reason) {\n EMListenerHandle.getInstance().addHandle(\n ()-> {\n Map<String, Object> data = new HashMap<>();\n data.put(\"type\", \"onGroupInvitationAccepted\");\n data.put(\"groupId\", groupId);\n data.put(\"invitee\", invitee);\n data.put(\"reason\", reason);\n post(() -> channel.invokeMethod(EMSDKMethod.onGroupChanged, data));\n }\n );\n\n }\n\n @Override\n public void onInvitationDeclined(String groupId, String invitee, String reason) {\n EMListenerHandle.getInstance().addHandle(\n ()-> {\n Map<String, Object> data = new HashMap<>();\n data.put(\"type\", \"onGroupInvitationDeclined\");\n data.put(\"groupId\", groupId);\n data.put(\"invitee\", invitee);\n data.put(\"reason\", reason);\n post(() -> channel.invokeMethod(EMSDKMethod.onGroupChanged, data));\n }\n );\n\n }\n\n @Override\n public void onUserRemoved(String groupId, String groupName) {\n EMListenerHandle.getInstance().addHandle(\n ()-> {\n Map<String, Object> data = new HashMap<>();\n data.put(\"type\", \"onGroupUserRemoved\");\n data.put(\"groupId\", groupId);\n data.put(\"groupName\", groupName);\n post(() -> channel.invokeMethod(EMSDKMethod.onGroupChanged, data));\n }\n );\n\n }\n\n @Override\n public void onGroupDestroyed(String groupId, String groupName) {\n EMListenerHandle.getInstance().addHandle(\n ()-> {\n Map<String, Object> data = new HashMap<>();\n data.put(\"type\", \"onGroupDestroyed\");\n data.put(\"groupId\", groupId);\n data.put(\"groupName\", groupName);\n post(() -> channel.invokeMethod(EMSDKMethod.onGroupChanged, data));\n }\n );\n\n }\n\n @Override\n public void onAutoAcceptInvitationFromGroup(String groupId, String inviter, String inviteMessage) {\n EMListenerHandle.getInstance().addHandle(\n ()-> {\n Map<String, Object> data = new HashMap<>();\n data.put(\"type\", \"onGroupAutoAcceptInvitation\");\n data.put(\"groupId\", groupId);\n data.put(\"inviter\", inviter);\n data.put(\"inviteMessage\", inviteMessage);\n post(() -> channel.invokeMethod(EMSDKMethod.onGroupChanged, data));\n }\n );\n\n }\n\n @Override\n public void onMuteListAdded(String groupId, List<String> mutes, long muteExpire) {\n EMListenerHandle.getInstance().addHandle(\n ()-> {\n Map<String, Object> data = new HashMap<>();\n data.put(\"type\", \"onGroupMuteListAdded\");\n data.put(\"groupId\", groupId);\n data.put(\"mutes\", mutes);\n data.put(\"muteExpire\", muteExpire);\n post(() -> channel.invokeMethod(EMSDKMethod.onGroupChanged, data));\n }\n );\n\n }\n\n @Override\n public void onMuteListRemoved(String groupId, List<String> mutes) {\n EMListenerHandle.getInstance().addHandle(\n ()-> {\n Map<String, Object> data = new HashMap<>();\n data.put(\"type\", \"onGroupMuteListRemoved\");\n data.put(\"groupId\", groupId);\n data.put(\"mutes\", mutes);\n post(() -> channel.invokeMethod(EMSDKMethod.onGroupChanged, data));\n }\n );\n }\n\n @Override\n public void onAdminAdded(String groupId, String administrator) {\n EMListenerHandle.getInstance().addHandle(\n ()-> {\n Map<String, Object> data = new HashMap<>();\n data.put(\"type\", \"onGroupAdminAdded\");\n data.put(\"groupId\", groupId);\n data.put(\"administrator\", administrator);\n post(() -> channel.invokeMethod(EMSDKMethod.onGroupChanged, data));\n }\n );\n }\n\n @Override\n public void onAdminRemoved(String groupId, String administrator) {\n EMListenerHandle.getInstance().addHandle(\n ()-> {\n Map<String, Object> data = new HashMap<>();\n data.put(\"type\", \"onGroupAdminRemoved\");\n data.put(\"groupId\", groupId);\n data.put(\"administrator\", administrator);\n post(() -> channel.invokeMethod(EMSDKMethod.onGroupChanged, data));\n }\n );\n }\n\n @Override\n public void onOwnerChanged(String groupId, String newOwner, String oldOwner) {\n EMListenerHandle.getInstance().addHandle(\n ()-> {\n Map<String, Object> data = new HashMap<>();\n data.put(\"type\", \"onGroupOwnerChanged\");\n data.put(\"groupId\", groupId);\n data.put(\"newOwner\", newOwner);\n data.put(\"oldOwner\", oldOwner);\n post(() -> channel.invokeMethod(EMSDKMethod.onGroupChanged, data));\n }\n );\n }\n\n @Override\n public void onMemberJoined(String groupId, String member) {\n EMListenerHandle.getInstance().addHandle(\n ()-> {\n Map<String, Object> data = new HashMap<>();\n data.put(\"type\", \"onGroupMemberJoined\");\n data.put(\"groupId\", groupId);\n data.put(\"member\", member);\n post(() -> channel.invokeMethod(EMSDKMethod.onGroupChanged, data));\n }\n );\n }\n\n @Override\n public void onMemberExited(String groupId, String member) {\n EMListenerHandle.getInstance().addHandle(\n ()-> {\n Map<String, Object> data = new HashMap<>();\n data.put(\"type\", \"onGroupMemberExited\");\n data.put(\"groupId\", groupId);\n data.put(\"member\", member);\n post(() -> channel.invokeMethod(EMSDKMethod.onGroupChanged, data));\n }\n );\n }\n\n @Override\n public void onAnnouncementChanged(String groupId, String announcement) {\n EMListenerHandle.getInstance().addHandle(\n ()-> {\n Map<String, Object> data = new HashMap<>();\n data.put(\"type\", \"onGroupAnnouncementChanged\");\n data.put(\"groupId\", groupId);\n data.put(\"announcement\", announcement);\n post(() -> channel.invokeMethod(EMSDKMethod.onGroupChanged, data));\n }\n );\n }\n\n @Override\n public void onSharedFileAdded(String groupId, EMMucSharedFile sharedFile) {\n EMListenerHandle.getInstance().addHandle(\n ()-> {\n Map<String, Object> data = new HashMap<>();\n data.put(\"type\", \"onGroupSharedFileAdded\");\n data.put(\"groupId\", groupId);\n data.put(\"sharedFile\", EMMucSharedFileHelper.toJson(sharedFile));\n post(() -> channel.invokeMethod(EMSDKMethod.onGroupChanged, data));\n }\n );\n }\n\n @Override\n public void onSharedFileDeleted(String groupId, String fileId) {\n EMListenerHandle.getInstance().addHandle(\n ()-> {\n Map<String, Object> data = new HashMap<>();\n data.put(\"type\", \"onGroupSharedFileDeleted\");\n data.put(\"groupId\", groupId);\n data.put(\"fileId\", fileId);\n post(() -> channel.invokeMethod(EMSDKMethod.onGroupChanged, data));\n }\n );\n }\n\n @Override\n public void onSpecificationChanged(EMGroup group) {\n EMListenerHandle.getInstance().addHandle(\n ()-> {\n Map<String, Object> data = new HashMap<>();\n data.put(\"type\", \"onGroupSpecificationDidUpdate\");\n data.put(\"group\", EMGroupHelper.toJson(group));\n post(() -> channel.invokeMethod(EMSDKMethod.onGroupChanged, data));\n }\n );\n }\n\n @Override\n public void onStateChanged(EMGroup group, boolean isDisabled) {\n EMListenerHandle.getInstance().addHandle(\n ()-> {\n Map<String, Object> data = new HashMap<>();\n data.put(\"type\", \"onGroupStateChanged\");\n data.put(\"groupId\", group.getGroupId());\n data.put(\"isDisabled\", isDisabled);\n post(() -> channel.invokeMethod(EMSDKMethod.onGroupChanged, data));\n }\n );\n }\n\n @Override\n public void onGroupMemberAttributeChanged(String groupId, String userId, Map<String, String> attribute, String from) {\n EMListenerHandle.getInstance().addHandle(\n ()-> {\n Map<String, Object> data = new HashMap<>();\n data.put(\"type\", \"onGroupAttributesChangedOfMember\");\n data.put(\"groupId\", groupId);\n data.put(\"userId\", userId);\n if (from != null) {\n data.put(\"operatorId\", from);\n }\n if (attribute != null) {\n data.put(\"attributes\", attribute);\n }\n post(() -> channel.invokeMethod(EMSDKMethod.onGroupChanged, data));\n }\n );\n }\n };\n EMClient.getInstance().groupManager().addGroupChangeListener(groupChangeListener);\n }\n\n @Override\n public void unRegisterEaseListener() {\n EMClient.getInstance().groupManager().removeGroupChangeListener(groupChangeListener);\n }\n}" }, { "alpha_fraction": 0.661108672618866, "alphanum_fraction": 0.6612974405288696, "avg_line_length": 45.06666564941406, "blob_id": "81e91659b63f5b6df29f9d7a8d4f91a1498a4894", "content_id": "e0faf3c6f735b514a05ee1a6b5df0ebdf91bb44c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 15893, "license_type": "permissive", "max_line_length": 171, "num_lines": 345, "path": "/android/src/main/java/com/easemob/im_flutter_sdk/EMPushManagerWrapper.java", "repo_name": "easemob/im_flutter_sdk", "src_encoding": "UTF-8", "text": "package com.easemob.im_flutter_sdk;\n\nimport android.content.Context;\n\nimport com.hyphenate.chat.EMClient;\nimport com.hyphenate.chat.EMConversation;\nimport com.hyphenate.chat.EMGroup;\nimport com.hyphenate.chat.EMPushConfigs;\nimport com.hyphenate.chat.EMPushManager.DisplayStyle;\nimport com.hyphenate.chat.EMSilentModeParam;\nimport com.hyphenate.chat.EMSilentModeResult;\nimport com.hyphenate.exceptions.HyphenateException;\n\nimport org.json.JSONArray;\nimport org.json.JSONException;\nimport org.json.JSONObject;\n\nimport java.util.ArrayList;\nimport java.util.HashMap;\nimport java.util.Iterator;\nimport java.util.List;\nimport java.util.Map;\n\nimport io.flutter.embedding.engine.plugins.FlutterPlugin;\nimport io.flutter.plugin.common.MethodCall;\nimport io.flutter.plugin.common.MethodChannel.MethodCallHandler;\nimport io.flutter.plugin.common.MethodChannel.Result;\nimport io.flutter.plugin.common.PluginRegistry;\n\n\npublic class EMPushManagerWrapper extends EMWrapper implements MethodCallHandler {\n\n EMPushManagerWrapper(FlutterPlugin.FlutterPluginBinding flutterPluginBinding, String channelName) {\n super(flutterPluginBinding, channelName);\n }\n\n @Override\n public void onMethodCall(MethodCall call, Result result) {\n JSONObject param = (JSONObject)call.arguments;\n try {\n if (EMSDKMethod.getImPushConfig.equals(call.method)) {\n getImPushConfig(param, call.method, result);\n }\n else if(EMSDKMethod.getImPushConfigFromServer.equals(call.method)){\n getImPushConfigFromServer(param, call.method, result);\n }\n else if(EMSDKMethod.updatePushNickname.equals(call.method)){\n updatePushNickname(param, call.method, result);\n }\n else if(EMSDKMethod.updateImPushStyle.equals(call.method)){\n updateImPushStyle(param, call.method, result);\n }\n else if(EMSDKMethod.updateGroupPushService.equals(call.method)){\n updateGroupPushService(param, call.method, result);\n }\n else if(EMSDKMethod.updateHMSPushToken.equals(call.method)){\n updateHMSPushToken(param, call.method, result);\n }\n else if(EMSDKMethod.updateFCMPushToken.equals(call.method)){\n updateFCMPushToken(param, call.method, result);\n }\n else if (EMSDKMethod.enableOfflinePush.equals(call.method)) {\n enableOfflinePush(param, call.method, result);\n }\n else if (EMSDKMethod.disableOfflinePush.equals(call.method)){\n disableOfflinePush(param, call.method, result);\n }\n else if (EMSDKMethod.getNoPushGroups.equals(call.method)) {\n getNoPushGroups(param, call.method, result);\n }\n else if (EMSDKMethod.updateUserPushService.equals(call.method)) {\n updateUserPushService(param, call.method, result);\n }\n else if (EMSDKMethod.reportPushAction.equals(call.method)) {\n reportPushAction(param, call.method, result);\n }\n else if (EMSDKMethod.setConversationSilentMode.equals(call.method)) {\n setConversationSilentMode(param, call.method, result);\n }\n else if (EMSDKMethod.removeConversationSilentMode.equals(call.method)) {\n removeConversationSilentMode(param, call.method, result);\n }\n else if (EMSDKMethod.fetchConversationSilentMode.equals(call.method)) {\n fetchConversationSilentMode(param, call.method, result);\n }\n else if (EMSDKMethod.setSilentModeForAll.equals(call.method)) {\n setSilentModeForAll(param, call.method, result);\n }\n else if (EMSDKMethod.fetchSilentModeForAll.equals(call.method)) {\n fetchSilentModeForAll(param, call.method, result);\n }\n else if (EMSDKMethod.fetchSilentModeForConversations.equals(call.method)) {\n fetchSilentModeForConversations(param, call.method, result);\n }\n else if (EMSDKMethod.setPreferredNotificationLanguage.equals(call.method)) {\n setPreferredNotificationLanguage(param, call.method, result);\n }\n else if (EMSDKMethod.fetchPreferredNotificationLanguage.equals(call.method)) {\n fetchPreferredNotificationLanguage(param, call.method, result);\n }\n else if (EMSDKMethod.getPushTemplate.equals(call.method)) {\n getPushTemplate(param, call.method, result);\n }\n else if (EMSDKMethod.setPushTemplate.equals(call.method)) {\n setPushTemplate(param, call.method, result);\n }\n else {\n super.onMethodCall(call, result);\n }\n }catch (JSONException e) {\n\n }\n }\n\n private void getImPushConfig(JSONObject params, String channelName, Result result) throws JSONException {\n asyncRunnable(()->{\n EMPushConfigs configs = EMClient.getInstance().pushManager().getPushConfigs();\n onSuccess(result, channelName, EMPushConfigsHelper.toJson(configs));\n });\n\n }\n\n private void getImPushConfigFromServer(JSONObject params, String channelName, Result result) throws JSONException {\n asyncRunnable(()->{\n try {\n EMPushConfigs configs = EMClient.getInstance().pushManager().getPushConfigsFromServer();\n onSuccess(result, channelName, EMPushConfigsHelper.toJson(configs));\n } catch (HyphenateException e) {\n onError(result, e);\n }\n });\n }\n\n private void updatePushNickname(JSONObject params, String channelName, Result result) throws JSONException {\n String nickname = params.getString(\"nickname\");\n\n asyncRunnable(()->{\n try {\n EMClient.getInstance().pushManager().updatePushNickname(nickname);\n onSuccess(result, channelName, nickname);\n } catch (HyphenateException e) {\n onError(result, e);\n }\n });\n }\n\n\n private void enableOfflinePush(JSONObject params, String channelName, Result result) throws JSONException\n {\n asyncRunnable(()-> {\n try {\n EMClient.getInstance().pushManager().enableOfflinePush();\n onSuccess(result, channelName, null);\n } catch(HyphenateException e) {\n onError(result, e);\n }\n });\n }\n\n private void disableOfflinePush(JSONObject params, String channelName, Result result) throws JSONException\n {\n int startTime = params.getInt(\"start\");\n int endTime = params.getInt(\"end\");\n asyncRunnable(()-> {\n try {\n EMClient.getInstance().pushManager().disableOfflinePush(startTime, endTime);\n onSuccess(result, channelName, null);\n } catch(HyphenateException e) {\n onError(result, e);\n }\n });\n }\n\n private void getNoPushGroups(JSONObject params, String channelName, Result result) throws JSONException {\n asyncRunnable(()-> {\n List<String> groups = EMClient.getInstance().pushManager().getNoPushGroups();\n onSuccess(result, channelName, groups);\n });\n }\n\n private void getNoPushUsers(JSONObject params, String channelName, Result result) throws JSONException {\n asyncRunnable(()->{\n List<String> list = EMClient.getInstance().pushManager().getNoPushUsers();\n onSuccess(result, channelName, list);\n });\n }\n\n private void updateImPushStyle(JSONObject params, String channelName, Result result) throws JSONException {\n DisplayStyle style = params.getInt(\"pushStyle\") == 0 ? DisplayStyle.SimpleBanner : DisplayStyle.MessageSummary;\n EMClient.getInstance().pushManager().asyncUpdatePushDisplayStyle(style, new EMWrapperCallBack(result, channelName, true));\n }\n\n private void updateGroupPushService(JSONObject params, String channelName, Result result) throws JSONException {\n JSONArray groupIds = params.getJSONArray(\"group_ids\");\n boolean noPush = params.getBoolean(\"noPush\");\n\n List<String> groupList = new ArrayList<>();\n for (int i = 0; i < groupIds.length(); i++) {\n String groupId = groupIds.getString(i);\n groupList.add(groupId);\n }\n asyncRunnable(()-> {\n try {\n EMClient.getInstance().pushManager().updatePushServiceForGroup(groupList, noPush);\n onSuccess(result, channelName, null);\n } catch(HyphenateException e) {\n onError(result, e);\n }\n });\n }\n\n private void updateUserPushService(JSONObject params, String channelName, Result result) throws JSONException {\n JSONArray groupIds = params.getJSONArray(\"user_ids\");\n boolean noPush = params.getBoolean(\"noPush\");\n\n List<String> userList = new ArrayList<>();\n for (int i = 0; i < groupIds.length(); i++) {\n String userId = groupIds.getString(i);\n userList.add(userId);\n }\n asyncRunnable(()-> {\n try {\n EMClient.getInstance().pushManager().updatePushServiceForUsers(userList, noPush);\n onSuccess(result, channelName, null);\n } catch(HyphenateException e) {\n onError(result, e);\n }\n });\n }\n\n private void updateHMSPushToken(JSONObject params, String channelName, Result result) throws JSONException {\n String token = params.getString(\"token\");\n asyncRunnable(()->{\n EMClient.getInstance().sendHMSPushTokenToServer(token);\n onSuccess(result, channelName, token);\n });\n }\n\n private void updateFCMPushToken(JSONObject params, String channelName, Result result) throws JSONException {\n String token = params.getString(\"token\");\n String fcmKey = EMClient.getInstance().getOptions().getPushConfig().getFcmSenderId();\n EMClient.getInstance().pushManager().bindDeviceToken(fcmKey, token, new EMWrapperCallBack(result, channelName, null));\n }\n\n private void reportPushAction(JSONObject params, String channelName, Result result) throws JSONException {\n\n }\n\n private void setConversationSilentMode(JSONObject params, String channelName, Result result) throws JSONException {\n String conversationId = params.getString(\"conversationId\");\n EMConversation.EMConversationType type = EMConversationHelper.typeFromInt(params.getInt(\"conversationType\"));\n EMSilentModeParam param = EMSilentModeParamHelper.fromJson(params.getJSONObject(\"param\"));\n EMClient.getInstance().pushManager().setSilentModeForConversation(conversationId, type, param, new EMValueWrapperCallBack<EMSilentModeResult>(result, channelName){\n @Override\n public void onSuccess(EMSilentModeResult object) {\n super.updateObject(null);\n }\n });\n }\n private void removeConversationSilentMode(JSONObject params, String channelName, Result result) throws JSONException {\n String conversationId = params.getString(\"conversationId\");\n EMConversation.EMConversationType type = EMConversationHelper.typeFromInt(params.getInt(\"conversationType\"));\n EMClient.getInstance().pushManager().clearRemindTypeForConversation(conversationId, type, new EMWrapperCallBack(result, channelName, null));\n }\n\n private void fetchConversationSilentMode(JSONObject params, String channelName, Result result) throws JSONException {\n String conversationId = params.getString(\"conversationId\");\n EMConversation.EMConversationType type = EMConversationHelper.typeFromInt(params.getInt(\"conversationType\"));\n EMClient.getInstance().pushManager().getSilentModeForConversation(conversationId, type, new EMValueWrapperCallBack<EMSilentModeResult>(result, channelName){\n @Override\n public void onSuccess(EMSilentModeResult object) {\n super.updateObject(EMSilentModeResultHelper.toJson(object));\n }\n });\n }\n\n private void setSilentModeForAll(JSONObject params, String channelName, Result result) throws JSONException {\n EMSilentModeParam param = EMSilentModeParamHelper.fromJson(params.getJSONObject(\"param\"));\n EMClient.getInstance().pushManager().setSilentModeForAll(param ,new EMValueWrapperCallBack<EMSilentModeResult>(result, channelName){\n @Override\n public void onSuccess(EMSilentModeResult object) {\n super.updateObject(null);\n }\n });\n }\n\n private void fetchSilentModeForAll(JSONObject params, String channelName, Result result) throws JSONException {\n EMClient.getInstance().pushManager().getSilentModeForAll(new EMValueWrapperCallBack<EMSilentModeResult>(result, channelName){\n @Override\n public void onSuccess(EMSilentModeResult object) {\n super.updateObject(EMSilentModeResultHelper.toJson(object));\n }\n });\n }\n private void fetchSilentModeForConversations(JSONObject params, String channelName, Result result) throws JSONException {\n Iterator iterator = params.keys();\n ArrayList<EMConversation> list = new ArrayList<>();\n while (iterator.hasNext()) {\n String conversationId = (String)iterator.next();\n EMConversation.EMConversationType type = EMConversationHelper.typeFromInt(params.getInt(conversationId));\n EMConversation conversation = EMClient.getInstance().chatManager().getConversation(conversationId, type, true);\n list.add(conversation);\n }\n\n EMClient.getInstance().pushManager().getSilentModeForConversations(list, new EMValueWrapperCallBack<Map<String, EMSilentModeResult>>(result, channelName) {\n @Override\n public void onSuccess(Map<String, EMSilentModeResult> object) {\n Map<String ,Map> result = new HashMap<>();\n for (Map.Entry<String, EMSilentModeResult>entry: object.entrySet()) {\n result.put(entry.getKey(), EMSilentModeResultHelper.toJson(entry.getValue()));\n }\n super.updateObject(result);\n }\n });\n\n }\n\n private void setPreferredNotificationLanguage(JSONObject params, String channelName, Result result) throws JSONException {\n String code = params.getString(\"code\");\n EMClient.getInstance().pushManager().setPreferredNotificationLanguage(code, new EMWrapperCallBack(result, channelName, null));\n }\n\n private void fetchPreferredNotificationLanguage(JSONObject params, String channelName, Result result) throws JSONException {\n EMClient.getInstance().pushManager().getPreferredNotificationLanguage(new EMValueWrapperCallBack<String>(result, channelName){\n @Override\n public void onSuccess(String object) {\n super.onSuccess(object);\n }\n });\n }\n\n private void setPushTemplate(JSONObject params, String channelName, Result result) throws JSONException {\n String pushTemplateName = params.getString(\"pushTemplateName\");\n EMClient.getInstance().pushManager().setPushTemplate(pushTemplateName, new EMWrapperCallBack(result, channelName, null));\n }\n\n private void getPushTemplate(JSONObject params, String channelName, Result result) throws JSONException {\n EMClient.getInstance().pushManager().getPushTemplate(new EMValueWrapperCallBack<String>(result, channelName) {\n @Override\n public void onSuccess(String object) {\n super.onSuccess(object);\n }\n });\n }\n}\n" }, { "alpha_fraction": 0.717841625213623, "alphanum_fraction": 0.7472435832023621, "avg_line_length": 18.185897827148438, "blob_id": "84646db560fd3155c985b5d09b575c779a458736", "content_id": "ec8bda0d4a59d65435102be9d27195b8fa4cc892", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 9646, "license_type": "permissive", "max_line_length": 121, "num_lines": 312, "path": "/CHANGELOG.md", "repo_name": "easemob/im_flutter_sdk", "src_encoding": "UTF-8", "text": "## 4.1.0\n\n#### 新增:\n- 增加 `EMOptions#osType`属性和`EMOptions#deviceName`属性,用户设置设备类型和设备名称;\n- 增加 `Combine` 消息类型,用于合并转发消息;\n- 增加 `EMChatManager#fetchCombineMessageDetail` 方法;\n- 增加 `EMChatManager#modifyMessage` 方法用户修改已发送的消息,目前只支持文本消息消息;\n- 增加 `EMChatEventHandler#onMessageContentChanged` 回调,用户监听消息编辑实现;\n- 增加 `EMClient#fetchLoggedInDevices` 方法,可是使用token获取已登录的设备列表;\n- 增加 `EMClient#kickDevice` 方法,可以使用 token 踢掉指定设备;\n- 增加 `EMClient#kickAllDevices` 方法,可以使用 token 踢掉所有已登录设备;\n- 增加 `EMChatManager#fetchConversation` 方法,获取服务器会话列表,原方法 `EMChatManager#getConversationsFromServer` 作废;\n- 增加 `EMChatManager#pinConversation` 方法,实现在服务器会话列表中 置顶/取消置顶 会话;\n- 增加 `hatManager#fetchPinnedConversations` 方法,从服务器获取已置顶会话;\n- 增加 `EMMessage#receiverList` 属性,用于在群组/聊天室中发送定向消息;\n\n#### 修复:\n- 修复 ios 中无法收到 `EMConnectionEventHandler#onConnected` 和 `EMConnectionEventHandler#onDisconnected` 的问题;\n- 修复 安卓消息中,发送方`attributes` 中包含string类型,接收方变为int类型的问题;\n\n#### 优化:\n- 离开聊天室 `EMChatRoomEventHandler#onRemovedFromChatRoom` 回调中增加离开原因;\n- 被其他设备踢下线 `EMConnectionEventHandler#onUserDidLoginFromOtherDevice` 回调中增加操作人deviceName;\n\n\n## 4.0.2\n\n#### 新增:\n- 增加 `EMGroupManager#setMemberAttributes` 方法,用于设置群成员属性;\n- 增加 `EMGroupManager#fetchMemberAttributes` 和 `GroupManager#fetchMembersAttributes` 方法用户获取群成员属性;\n- 增加 `EMGroupEventHandler#onAttributesChangedOfGroupMember` 群成员属性变更回调;\n- 增加 `EMChatManager#fetchHistoryMessagesByOption` 方法;\n- 增加 `EMConversation#deleteMessagesWithTs` 方法;\n- 增加 `EMMessage#deliverOnlineOnly` 属性用于设置只向在线用户投递消息;\n\n#### 修复:\n- 修复安卓 hot reload 后回调多次的问题;\n- 修复iOS 获取聊天室属性key传null导致的崩溃问题;\n\n#### 优化:\n- 为`ChatManager#fetchHistoryMessages` 方法增加获取方向;\n\n## 4.0.0+7\n\n#### 修复\n- 修复初始化无返回的问题。\n\n## 4.0.0+6\n\n#### 修复\n- 修复下载附件结束后状态不准确的问题。\n\n## 4.0.0+7\n- 修复初始化问题。\n\n## 4.0.0+6\n- 修复下载附件结束后状态不准确的问题。\n\n## 4.0.0+5\n\n#### 修复\n- 修复下载附件回调不执行。\n\n## 4.0.0+4\n\n#### 修复\n- 安卓构建视频消息崩溃的问题。\n\n## 4.0.0+3\n\n#### 修复\n- 安卓 `onRemovedFromChatRoom` 不回调。\n\n## 4.0.0+2\n\n#### 修复\n\n- 修复List<String>? 转换失败;\n- 修复图片消息和视频消息转换失败;\n\n## 4.0.0\n\n#### 新增特性\n\n- 依赖的原生平台 `iOS` 和 `Android` 的 SDK 升级为 v4.0.0 版本。\n- 新增 `EMChatManager#fetchConversationListFromServer` 方法实现从服务器分页获取会话列表。\n- 新增 `EMMessage#chatroomMessagePriority` 属性实现聊天室消息优先级功能,确保高优先级消息优先处理。\n\n#### 优化\n\n修改发送消息结果的回调由 `EMMessage#setMessageStatusCallBack` 修改为 `EMChatManager#addMessageEvent`。\n\n#### 修复\n\n修复 `EMChatManager#deleteMessagesBeforeTimestamp` 执行失败的问题。\n\n# 3.9.9+1\n修复:\n1. 修复ios群已读回执不执行;\n\n新增:\n1. 增加会话根据时间删除服务器漫游消息api `EMConversation#removeServerMessageBeforeTimeStamp(timestamp)`。\n\n# 3.9.9\n修复:\n1.修复极端情况下 SDK 崩溃的问题。\n\n## 3.9.7+4\n修复:\n1. 安卓不执行onGroupDestroyed回调;\n2. 构造位置消息时无法设置buildingName;\n\n## 3.9.7+3\n修复:\n1. 安卓不会执行 onAutoAcceptInvitationFromGroup 回调;\n\n## 3.9.7+2\n\n修复:\n1. 修复 StartCallback() 不会回调的问题;\n2. 修复 iOS 根据时间获取消息失败的问题;\n\n## 3.9.7+1\n\n修复:\n 1. 修复 安卓 fcm send id偶现为空的问题;\n 2. 修复 安卓 `SilentModeResult` expireTs 为空的问题;\n\n## 3.9.7\n\n新增特性:\n 1. 新增聊天室自定义属性功能。\n 2. 新增 `areaCode` 方法限制连接边缘节点的范围。\n 3. `EMGroup` 中增加 `isDisabled` 属性显示群组禁用状态,需要开发者在服务端设置。该属性在调用 `EMGroupManager` 中的 `fetchGroupInfoFromServer` 方法获取群组详情时返回。\n\n优化:\n 1. 移除 SDK 一部分冗余日志。\n\n修复\n 1. 修复极少数场景下,从服务器获取较大数量的消息时失败的问题。\n 2. 修复数据统计不正确的问题。\n 3. 修复极少数场景下打印日志导致的崩溃。\n\n## 3.9.5\n\n- 将 AddManagerListener 方法标为过期;\n- 增加 customEventHandler;\n- 添加 EventHandler;\n- 增加 PushTemplate 方法;\n- 增加 Group isDisabled 属性;\n- 增加 PushConfigs displayName 属性;\n- 修改 Api referances;\n- 升级原生依赖为 3.9.5\n\n## 3.9.4+3\n\n- 修复 安卓端 `loadAllConversations` crash.\n\n## 3.9.4+2\n\n- 修复 `EMClient.getInstance.startCallback()` 执行时安卓偶现崩溃;\n\n## 3.9.4+1\n- 增加ChatSilentMode;\n\n## 3.9.4\n- 移除过期Api;\n\n## 3.9.3\n- 新增thread实现;\n- 修复部分bug;\n- 依赖原生sdk版本为3.9.3\n\n## 3.9.2\n- 增加Reaction实现;\n- 增加举报功能;\n- 增加获取群组已读api;\n- 添加下载群文件进度回调;\n- 修复下载视频偶现失败;\n- 修复获取群免打扰详情失败;\n- 修复 startCallback是 ios 偶现 crash;\n\n\n\n## 3.9.1\n- 增加 用户在线状态 (Presence) 订阅功能;\n- 增加 翻译 功能更新,增加自动翻译接口。用户可以按需翻译,和发消息自动翻译。\n\n## 3.9.0+2\n\n- 修改用户退出/离线回调;\n - EMConnectionListener#onConnected: 长连接恢复;\n - EMConnectionListener#onDisconnected: 长连接断开;\n - EMConnectionListener#onUserDidLoginFromOtherDevice: 当前账号在其他设备登录;\n - EMConnectionListener#onUserDidRemoveFromServer: 当前账号被服务器删除;\n - EMConnectionListener#onUserDidForbidByServer: 当前账号登录被服务器拒绝;\n - EMConnectionListener#onUserDidChangePassword: 当前账号密码变更;\n - EMConnectionListener#onUserDidLoginTooManyDevice: 当前账号登录太多设备;\n - EMConnectionListener#onUserKickedByOtherDevice: 当前账号被登录的其他设备设置下线;\n - EMConnectionListener#onUserAuthenticationFailed: 当前账号鉴权失败;\n- 依赖原生sdk版本为3.9.2.1;\n- 修复ios group ack 问题;\n\n## 3.9.0+1\n\n- 修复message.attribute不准;\n\n- 增加 EMClient.getInstance.startCallback() 方法\n \n ```dart\n EMClient.getInstance.startCallback();\n ```\n \n 只有调用该方法后,`EMContactManagerListener`、 `EMGroupEventListener` 、 `EMChatRoomEventListener` 回调才会开始执行;\n\n- 修复删除聊天室白名单成员失败;\n\n## 3.9.0\n\n- 增加单人推送免打扰接口;\n\n- 增加api referance;\n\n- 增加renewToken api;\n\n- 修改消息callback方式;\n\n- iOS移除自动绑定deviceToken,如需使用,需要在iOS端单独增加;\n\n- android移除多余权限;\n\n- 修改已知bug;\n\n## 3.8.9\n\n- 增加单聊消息免打扰;\n- 去除不必要的信息收集;\n- 修复安卓某些场景下数据库损坏导致崩溃;\n- 移除对FCM11.4.0的依赖;\n- 修复安卓WAKE_LOCK权限导致的崩溃;\n- 增加用户被全局禁言时发消息错误码;\n- 增强数据传输安全性;\n- 增强本地数据存储安全性;\n- 新增使用Token登录时,Token过期的回调;\n- 修复拉取历史漫游消息不全的bug;\n- 默认使用https;\n- 优化登录速度;\n\n## 3.8.3+9\n\n- 将设置推送相关操作从EMPushConfigs中移到EMPushManager中;\n- 修复已知bug;\n\n## 3.8.3+8\n\n- 修复ios使用token登录失败;\n- 修改Login方法和Logout方法返回值;\n\n## 3.8.3+6\n\n- 修改EMImPushConfig为EMPushConfigs;\n- 删除EMOptions中的EMPushConfig.设置推送证书时直接调用EMOptions即可;\n- EMGroup中移除ShareFiles,如果需要获取共享文件,请调用Api:\n `EMClient.getInstance.groupManager.getGroupFileListFromServer(groupId)` \n- 将isConnected和isLoginBefore、Token改为从原生获取;\n- 修复安卓设置群组免打扰失效的问题;\n- 修复获取公开群crash的问题;\n- 修改throw error的逻辑;\n- 修改构造文本消息时的方法,需要传入参数名;\n- 修改部分原生方法逻辑;\n- 调整项目目录结构;\n- 将`onConversationRead`回调方法参数改为必选;\n- \n\n## 3.8.3+5\n\n- 更新安卓依赖原生sdk版本;\n- 修复获取本地群组crash;\n\n## 3.8.3+4\n\n* 修复消息attribute类型变为bool类型;\n* 修复群组免打扰属性不准;\n* 修复ios importMessages方法bug;\n* 修复群、聊天室禁言时不执行回调的bug;\n* 修复下载方法不执行callback;\n* 构造文件消息提供设置文件大小属性;\n* 修改`EMGroupChangeListener` 为 `EMGroupEventListener`\n\n## 3.8.3+3\n\n* 修复安卓下resendMessage方法发送失败时不回调onError;\n* 修复fetchChatRoomMembers返回类型错误;\n\n## 3.8.3+2\n\n* 增加群组已读回执;\n* 不在提供EMContact类,直接返回String类型username;\n\n## 3.8.3\n\n* 增加用户属性;\n* 修复已知bug;\n\n## 1.0.0\n\n* 用户管理;\n* 群组管理;\n* 聊天室管理;\n* 会话管理;\n* 通讯录管理;\n* 推送管理;\n" }, { "alpha_fraction": 0.6391191482543945, "alphanum_fraction": 0.6391191482543945, "avg_line_length": 23.131250381469727, "blob_id": "2213f5a3582110d6382caad3b6bf5083855b607d", "content_id": "db961ecb7bca72ba4f5771711ac1ec05a8b5cce9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3860, "license_type": "permissive", "max_line_length": 95, "num_lines": 160, "path": "/android/src/main/java/com/easemob/im_flutter_sdk/ImFlutterSdkPlugin.java", "repo_name": "easemob/im_flutter_sdk", "src_encoding": "UTF-8", "text": "package com.easemob.im_flutter_sdk;\n\nimport android.os.Handler;\nimport android.os.Looper;\n\nimport com.hyphenate.EMCallBack;\nimport com.hyphenate.EMValueCallBack;\nimport com.hyphenate.exceptions.HyphenateException;\nimport com.hyphenate.util.EMLog;\n\nimport io.flutter.embedding.engine.plugins.FlutterPlugin;\nimport io.flutter.plugin.common.MethodCall;\nimport io.flutter.plugin.common.MethodChannel;\nimport io.flutter.plugin.common.PluginRegistry.Registrar;\nimport io.flutter.plugin.common.MethodChannel.Result;\n\nimport java.util.HashMap;\nimport java.util.Map;\n\n\n/**\n * ImFlutterSdkPlugin\n */\npublic class ImFlutterSdkPlugin implements FlutterPlugin {\n\n static final Handler handler = new Handler(Looper.getMainLooper());\n\n EMClientWrapper clientWrapper;\n\n public ImFlutterSdkPlugin() {\n }\n\n public void sendDataToFlutter(final Map data) {\n if (clientWrapper != null) {\n clientWrapper.sendDataToFlutter(data);\n }\n }\n\n @Override\n public void onAttachedToEngine(FlutterPlugin.FlutterPluginBinding flutterPluginBinding) {\n clientWrapper = new EMClientWrapper(flutterPluginBinding, \"chat_client\");\n }\n\n @Override\n public void onDetachedFromEngine(FlutterPlugin.FlutterPluginBinding flutterPluginBinding) {\n clientWrapper.unRegisterEaseListener();\n }\n}\n\n\nclass EMWrapperCallBack implements EMCallBack {\n\n EMWrapperCallBack(Result result, String channelName, Object object) {\n this.result = result;\n this.channelName = channelName;\n this.object = object;\n }\n\n Result result;\n String channelName;\n Object object;\n\n void post(Runnable runnable) {\n ImFlutterSdkPlugin.handler.post(runnable);\n }\n\n @Override\n public void onSuccess() {\n post(() -> {\n Map<String, Object> data = new HashMap<>();\n if (object != null) {\n data.put(channelName, object);\n }\n result.success(data);\n });\n }\n\n @Override\n public void onError(int code, String desc) {\n post(() -> {\n Map<String, Object> data = new HashMap<>();\n data.put(\"error\", EMErrorHelper.toJson(code, desc));\n EMLog.e(\"callback\", desc);\n result.success(data);\n });\n }\n\n @Override\n public void onProgress(int progress, String status) {\n // no need\n }\n}\n\nclass EMDownloadCallback implements EMCallBack {\n\n EMDownloadCallback(String fileId, String savePath) {\n this.fileId = fileId;\n this.savePath = savePath;\n }\n String savePath;\n String fileId;\n\n\n @Override\n public void onSuccess() {\n\n }\n\n @Override\n public void onError(int code, String error) {\n\n }\n\n @Override\n public void onProgress(int progress, String status) {\n\n }\n}\n\n\nclass EMValueWrapperCallBack<T> implements EMValueCallBack<T> {\n\n EMValueWrapperCallBack(MethodChannel.Result result, String channelName)\n {\n this.result = result;\n this.channelName = channelName;\n }\n\n private MethodChannel.Result result;\n private String channelName;\n\n public void post(Runnable runnable) {\n ImFlutterSdkPlugin.handler.post(runnable);\n }\n\n @Override\n public void onSuccess(T object) {\n updateObject(object);\n }\n\n @Override\n public void onError(int code, String desc) {\n post(() -> {\n Map<String, Object> data = new HashMap<>();\n data.put(\"error\", EMErrorHelper.toJson(code, desc));\n EMLog.e(\"callback\", \"onError\");\n result.success(data);\n });\n }\n\n public void updateObject(Object object) {\n post(()-> {\n Map<String, Object> data = new HashMap<>();\n if (object != null) {\n data.put(channelName, object);\n }\n result.success(data);\n });\n }\n}" }, { "alpha_fraction": 0.5626690983772278, "alphanum_fraction": 0.5626690983772278, "avg_line_length": 19.537036895751953, "blob_id": "90ef45dc7a8e49294433245c7655a27a6de0623e", "content_id": "20cdc8bb0ac920a5efd092bb412916f7c2cadba8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1109, "license_type": "permissive", "max_line_length": 50, "num_lines": 54, "path": "/android/src/main/java/com/easemob/im_flutter_sdk/EMListenerHandle.java", "repo_name": "easemob/im_flutter_sdk", "src_encoding": "UTF-8", "text": "package com.easemob.im_flutter_sdk;\n\nimport java.util.ArrayList;\nimport java.util.List;\n\npublic class EMListenerHandle {\n\n static private EMListenerHandle handle;\n\n private List<Runnable> emActionHandle;\n\n private boolean hasReady;\n\n\n public static EMListenerHandle getInstance() {\n if (handle == null) {\n handle = new EMListenerHandle();\n }\n return handle;\n }\n\n private EMListenerHandle(){\n emActionHandle = new ArrayList<>();\n }\n\n void addHandle(Runnable runnable) {\n emActionHandle.add(runnable);\n if (hasReady) {\n runHandle();\n }\n }\n\n void runHandle() {\n synchronized (emActionHandle){\n List<Runnable> tmp = emActionHandle;\n for (Runnable action : tmp) {\n action.run();\n }\n emActionHandle.clear();\n }\n }\n\n void startCallback(){\n hasReady = true;\n runHandle();\n }\n\n void clearHandle(){\n hasReady = false;\n synchronized (emActionHandle) {\n emActionHandle.clear();\n }\n }\n}\n" } ]
15
Angelinaa/KOBE
https://github.com/Angelinaa/KOBE
3570077589fa8e3e2def03747b7ab5a4055374eb
4d25487051e2791a977e59297f70a25e51806466
61a29d8b16e3f8825cabf5e378f81656cfa25123
refs/heads/master
2020-06-29T10:09:26.376633
2019-08-03T15:18:19
2019-08-03T15:18:19
200,507,811
0
1
null
2019-08-04T15:13:41
2019-08-03T15:18:53
2019-08-03T15:18:51
null
[ { "alpha_fraction": 0.6869301199913025, "alphanum_fraction": 0.6990881562232971, "avg_line_length": 22.5, "blob_id": "cdbe099405b73364a5abf56758a55cd284ce8440", "content_id": "cbf16f34023447a2d647630ce7d2e738fa9bd260", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 333, "license_type": "permissive", "max_line_length": 55, "num_lines": 14, "path": "/webspider.py", "repo_name": "Angelinaa/KOBE", "src_encoding": "UTF-8", "text": "from bs4 import BeautifulSoup\nimport requests\nfrom urllib.request import urlopen\nimport re\nimport random\n\n\nbase_url = \"https://baike.baidu.com/item/\"\nkey = [\"测试\"]\nurl = base_url + key[-1]\nprint(url)\nhtml = requests.get(url).text\nsoup = BeautifulSoup(html, features='lxml')\nprint(soup.find('h1').get_text(), ' url: ', key[-1])\n" }, { "alpha_fraction": 0.5180505514144897, "alphanum_fraction": 0.5324909687042236, "avg_line_length": 26.04901885986328, "blob_id": "1735bfe5bf6c402fb00a1d94659e67ed285fb2d3", "content_id": "661e37230353e1660072f3cc53d9ebc3fcc4f1f8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3040, "license_type": "permissive", "max_line_length": 194, "num_lines": 102, "path": "/core/input2output.py", "repo_name": "Angelinaa/KOBE", "src_encoding": "UTF-8", "text": "import time\nimport random\nimport utils\nimport re\n#from api_a import *\n\nLOWER=0\nCHAR=0\n\nprint('*'*5+\"Model Loading...\"+'*'*5)\n#model = DescriptionGenerator(\n# config=\"configs/eval.yaml\",\n# gpu=\"0\",\n# restore=False,\n# pretrain=\"experiments/aspect-user/best_bleu_checkpoint.pt\",\n# mode=\"eval\",\n# batch_size=1,\n# beam_size=10,\n# # refactor issue; workaround; delete afterwards:\n# scale=1,\n# char=False,\n# use_cuda=True,\n# seed=1234,\n# model=\"tensor2tensor\",\n# num_workers=0\n# )\ndicts = {}\ndicts['src'] = utils.Dict(data='./dataloading/src.dict', lower=LOWER)\n\nprint('*'*5+\"欢迎使用爱文案AI文案生成服务\"+'*'*5)\nkey=''\nwhile (key!='quit'):\n key=''\n inputstr = ''\n aspect=''\n srcIds=[]\n srcWords=[]\n\n key = input(\"请输入关键词,以空格分开。\\n>>>\")\n if(key=='quit'):\n break\n keystr = key.replace(' ','')\n for char in keystr:\n inputstr = inputstr + char + \" \"\n inputstr = inputstr[:-1]\n\n aspect = input(\"请选择生成风格:\\n a. Appearance\\n b. Texture\\n c. Function\\n>>>\")\n assert bool(re.match(r'[abc]', aspect))\n \n inputstr = '<'+str(random.randint(0,35))+'> '+'<'+aspect+'> '+inputstr\n # print('\\n'+inputstr)\n length = input(\"请输入生成长度: \\n a. 短\\n b. 中\\n c. 长\\n>>>\")\n assert bool(re.match(r'[abc]', length)) \n \n if length=='a':\n lenlimit = 60\n elif length=='b':\n lenlimit = 110\n elif length=='c':\n lenlimit = 9999\n \n inputstr = inputstr.strip()\n if LOWER:\n inputstr = inputstr.lower()\n\n srcWords = inputstr.split() if not CHAR else list(inputstr)\n # print(srcWords)\n\n\n srcIds = dicts['src'].convertToIdx(srcWords, utils.UNK_WORD)\n # srcIdStr=(\" \".join(list(map(str, srcIds))))\n \n start = time.time()\n output = \"\".join(model.predict(srcIds))\n# output = \"90 后 潮 男 原 创 酷 帅 潮 流 高 街 t 恤 , 舒 适 柔 软 , 亲 肤 不 起 球 , 清 新 宽 松 , 是 个 不 怎 么 挑 人 的 版 型 , 简 约 而 不 简 单 , 上 身 效 果 极 好 , 穿 上 就 是 一 个 阳 光 大 男 孩 , 但 不 失 稳 重 , 休 闲 运 动 都 可 以\".replace(' ','')\n# output = re.split(\"[,.!? ,。!? ]+\", output)\n# outputstr=\"\"\n# i=0\n# while(len(outputstr)<=lenlimit and i<len(output)):\n# print(len(outputstr))\n# outputstr = outputstr+output[i]+','\n# i=i+1\n \n # cutting length\n pos=lenlimit\n outputstr = output[:lenlimit+1]\n while not bool(re.match(r'[,.!? ,。!?]', output[pos])):\n pos += 1\n if pos>=len(output):\n break\n outputstr = outputstr+output[pos]\n print(outputstr[-1])\n \n if outputstr[-1] == ',':\n outputstr = outputstr[:-1]\n if outputstr[-1] != '。':\n outputstr +='。'\n \n \n print(outputstr)\n duration = time.time() - start\n print(\"Time Spent: \", duration,'\\n')\n\n \n \n" } ]
2
OuchattiKhadija/ImageProcessor
https://github.com/OuchattiKhadija/ImageProcessor
d8d44caf689fbbcd2a420692a7afe06021b3cb67
ff25f1d79d358157cf3913f40a268d1d303e3141
0e84ddb3ec8ccdaba74ac7864257ca9779372e38
refs/heads/master
2022-12-26T18:24:05.076288
2020-10-04T17:31:50
2020-10-04T17:31:50
301,186,857
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.649856448173523, "alphanum_fraction": 0.6779424548149109, "avg_line_length": 28.429224014282227, "blob_id": "cc6b2b47b05a54738a0e6648c5cab177cee7706f", "content_id": "871723f8a03c03e2b434549ef28c805ef2c37cfc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12889, "license_type": "no_license", "max_line_length": 104, "num_lines": 438, "path": "/main.py", "repo_name": "OuchattiKhadija/ImageProcessor", "src_encoding": "UTF-8", "text": "\"\"\"\nCreated on Thu Jan 23 22:24:07 2020\n@author: imran\n\"\"\"\n\nfrom tkinter import * \nfrom tkinter.filedialog import *\nfrom tkinter import colorchooser\nfrom tkinter import ttk\nfrom PIL import Image, ImageTk, ImageDraw\n#local class\nfrom filtres import Filtre\n\n\n# global variables:\nx1 = 0\ny1 = 0\nx2 = 0\ny2 = 0\ncommand = \"Draw\"\ncolor = \"black\"\n\ndef coords_reset():\n global x1\n global y1\n global x2\n global y2\n x1 = 0\n y1 = 0\n x2 = 0\n y2 = 0\n \ndef canvas_set(image):\n global canvas\n photo = ImageTk.PhotoImage(image)\n canvas.config(width=image.width, height=image.height)\n canvas.image= photo\n canvas.create_image(0, 0, anchor=NW, image=photo)\n \ndef about():\n messagebox.showinfo(\"A propos de\", \"Image processor 1.0 - Created by Imran YAZIDI & Khadija OUCHATTI\")\n \ndef newimg():\n global image\n global draw\n image = Image.new(\"RGB\", (500, 400), \"white\")\n draw = ImageDraw.Draw(image)\n photo = ImageTk.PhotoImage(image)\n canvas2.config(width=image.width, height=image.height)\n canvas2.image= photo\n canvas2.create_image(0, 0, anchor=NW, image=photo)\n canvas_set(image)\n \ndef openimg():\n global filepath\n filepath = askopenfilename(title=\"Ouvrir une image\",filetypes=[('all files','.*')])\n global image\n global draw\n image = Image.open(filepath)\n draw = ImageDraw.Draw(image)\n global canvas2\n photo = ImageTk.PhotoImage(image)\n canvas2.config(width=image.width, height=image.height)\n canvas2.image= photo\n canvas2.create_image(0, 0, anchor=NW, image=photo)\n canvas_set(image)\n\ndef saveimgas():\n global image\n f = asksaveasfilename(filetypes=[('Png files', '.png'),('All files', '*')], defaultextension=\".png\")\n image.save(f,\"PNG\")\n \n# Image filters\n \ndef imgbin():\n global image\n image=Filtre(image).binary(x1,y1,x2,y2)\n canvas_set(image)\n \ndef imggraylevel():\n global image\n image=Filtre(image).graylevel()\n canvas_set(image)\n \ndef imgbrightnessplus():\n global image\n image=Filtre(image).brightness(50)\n canvas_set(image)\n \ndef imgbrightnessmoins():\n global image\n image=Filtre(image).brightness(-50)\n canvas_set(image)\n \ndef imgreverse():\n global image\n image=Filtre(image).reverse()\n canvas_set(image)\n\n\n\ndef mousemove(event):\n cursorposition.config(text=\"X: \"+str(event.x)+\" Y: \"+str(event.y))\n\ndef mousemoveclick(event):\n global x1\n global y1\n global x2\n global y2\n \n if(command == \"Draw\"):\n xl,y1=(event.x -1), (event.y -1) \n x2,y2=(event.x+1), (event.y+1) \n draw.rectangle([(xl,y1),(x2,y2)], fill=color, outline=color)\n canvas_set(image)\n \n if(command ==\"Select\"):\n if(x1==0):\n x1 = event.x\n y1 = event.y\n else:\n x2 = event.x\n y2 = event.y\n canvas.delete('no')\n dashes = [3, 2]\n canvas.create_rectangle(x1,y1,x2,y2,dash=dashes,outline=\"white\",tags='no')\n canvas.create_rectangle(x1+1,y1+1,x2+1,y2+1,dash=dashes,outline=\"black\",tags='no')\n \n \ndef mouseclick(event):\n global x1\n global y1\n global x2\n global y2\n global image\n \n if(command==\"Text\"):\n x1 = event.x\n y1 = event.y\n root = Tk()\n root.title(\"Insert Text\")\n label = Label(root, fg=\"green\",text=\"Insert Text\")\n label.pack()\n text = Entry(root,text=\"Hello\")\n text.pack()\n def set_text():\n global image\n draw = ImageDraw.Draw(image)\n draw.text((x1,y1), text.get(), fill=color)\n canvas_set(image)\n root.destroy()\n \n button = Button(root, text='OK', width=25, command=set_text)\n button.pack()\n root.mainloop()\n\ndef paint(): \n global command\n command =\"Draw\"\n canvas.config(cursor=\"pencil\")\n commandlabel.config(text=\"Command: \"+ command)\n \ndef select():\n global command\n command =\"Select\"\n commandlabel.config(text=\"Command: \"+ command)\n canvas.config(cursor=\"tcross\")\n coords_reset()\n \ndef errase():\n global color\n color = \"white\"\n \ndef askingcolor():\n global color\n (rgb, hx) = colorchooser.askcolor()\n color = hx\n print((rgb, hx))\n \ndef imgcrop():\n global x1\n global y1\n global x2\n global y2\n global image\n command = \"Crop\"\n commandlabel.config(text=\"Command: \"+ command)\n image = image.crop((x1, y1, x2, y2))\n canvas_set(image)\n coords_reset()\n\n\ndef imgtext():\n global command\n command =\"Text\"\n canvas.config(cursor=\"xterm\")\n commandlabel.config(text=\"Command: \"+ command)\n \n \n \ndef reset():\n global filepath\n global image\n global draw\n image = Image.open(filepath)\n draw = ImageDraw.Draw(image)\n photo = ImageTk.PhotoImage(image)\n canvas_set(image)\n \ndef mirror():\n global image\n image=Filtre(image).mirror()\n canvas_set(image)\n \ndef rotation():\n global image\n image=Filtre(image).rotation()\n canvas_set(image)\n \ndef cut_red():\n global image\n image=Filtre(image).cut_color(\"red\")\n canvas_set(image)\n\ndef cut_green():\n global image\n image=Filtre(image).cut_color(\"green\")\n canvas_set(image)\n \ndef cut_blue():\n global image\n image=Filtre(image).cut_color(\"blue\")\n canvas_set(image)\n\ndef redplus():\n global image\n image=Filtre(image).red_plus(+10)\n canvas_set(image)\n\ndef redmoins():\n global image\n image=Filtre(image).red_plus(-10)\n canvas_set(image)\n \ndef greenplus():\n global image\n image=Filtre(image).green_plus(+10)\n canvas_set(image)\n\ndef greenmoins():\n global image\n image=Filtre(image).green_plus(-10)\n canvas_set(image)\n \ndef blueplus():\n global image\n image=Filtre(image).blue_plus(+10)\n canvas_set(image)\n\ndef bluemoins():\n global image\n image=Filtre(image).blue_plus(-10)\n canvas_set(image)\n \ndef rgb_form():\n root = Tk()\n root.minsize(300,100)\n redButtonPlus = Button(root,text=\"R+\", command=redplus)\n redButtonPlus.grid(row = 0, column = 0 , sticky = W, pady = 2) \n redButtonMoins = Button(root,text=\"R-\", command=redmoins)\n redButtonMoins.grid(row = 0, column = 1, sticky = E, pady = 2) \n greenButtonPlus = Button(root,text=\"G+\", command=greenplus)\n greenButtonPlus.grid(row = 1 ,column = 0, sticky = W, pady = 2) \n greenButtonMoins = Button(root,text=\"G-\", command=greenmoins)\n greenButtonMoins.grid(row = 1, column = 1, sticky = E, pady = 2) \n blueButtonPlus = Button(root,text=\"B+\", command=blueplus)\n blueButtonPlus.grid(row = 2, column = 0, sticky = W, pady = 2) \n greenButtonMoins = Button(root,text=\"B-\", command=bluemoins)\n greenButtonMoins.grid(row = 2, column = 1, sticky = E, pady = 2) \n root.mainloop()\n \nimage = Image.new(\"RGB\", (500, 400), \"white\")\ndraw = ImageDraw.Draw(image)\n\nfenetre = Tk()\nfenetre.title(\"Image Processor 1.0\")\nfenetre.state(\"zoomed\")\n# tool bar\ntoolbar = Frame(fenetre, bd=1, relief=RAISED)\ntoolbar.grid(row = 0, column = 1, sticky = W, pady = 2) \n\ntoolbar2 = Frame(fenetre, width=50, height=600)\ntoolbar2.grid(row = 2, column = 0)\n\nrightframe = Frame(fenetre, width=300, height=600, background=\"black\")\nrightframe.grid(row = 2, column = 1)\n\n# canvas\ncanvas = Canvas(rightframe, width=500, height= 400 , bg=\"white\")\ncanvas.grid(row = 2, column = 0, pady = 20, padx = 20)\ncanvas.config(cursor=\"pencil\")\ncanvas.bind('<B1-Motion>',mousemoveclick)\ncanvas.bind('<Motion>',mousemove)\ncanvas.bind('<Button-1>',mouseclick)\n\n# canvas 2\ncanvas2 = Canvas(rightframe, width=500, height= 400 , bg=\"white\")\ncanvas2.grid(row = 2, column = 1, pady = 20, padx = 20)\n\nstatutbar = Frame(fenetre, width=300, height=600 )\nstatutbar.grid(row = 3, column = 1)\n\nsizelabel = Label(statutbar, text= \"Width: 500 Height: 400\")\nsizelabel.pack(side=RIGHT)\n\ncursorposition = Label(statutbar, text= \"X: 0 Y: 0\")\ncursorposition.pack(side=RIGHT)\n\ncommandlabel = Label(statutbar, text= \"Command: Draw\")\ncommandlabel.pack(side=RIGHT)\n\nimg1 = ImageTk.PhotoImage(Image.open(\"icons/exit.png\"))\nexitButton = Button(toolbar, image=img1, relief=FLAT,command=fenetre.destroy)\nexitButton.pack(side=RIGHT, padx=2, pady=2)\n\nlf_img0 = ImageTk.PhotoImage(Image.open(\"icons/reset.png\"))\nresetButton = Button(toolbar, image=lf_img0, relief=FLAT,command=reset)\nresetButton.pack(side=RIGHT, padx=2, pady=2)\n\nlf_img55 = ImageTk.PhotoImage(Image.open(\"icons/cut_blue.png\"))\nresetButton = Button(toolbar, image=lf_img55, relief=FLAT,command=cut_blue)\nresetButton.pack(side=RIGHT, padx=2, pady=2)\n\nlf_img56 = ImageTk.PhotoImage(Image.open(\"icons/cut_green.png\"))\nresetButton = Button(toolbar, image=lf_img56, relief=FLAT,command=cut_green)\nresetButton.pack(side=RIGHT, padx=2, pady=2)\n\nlf_img57 = ImageTk.PhotoImage(Image.open(\"icons/cut_red.png\"))\nresetButton = Button(toolbar, image=lf_img57, relief=FLAT,command=cut_red)\nresetButton.pack(side=RIGHT, padx=2, pady=2)\n\nrgbButton = Button(toolbar, text=\"RGB\", relief=FLAT,command=rgb_form)\nrgbButton.pack(side=RIGHT, padx=2, pady=2)\n\n\nlf_img00 = ImageTk.PhotoImage(Image.open(\"icons/mirror.png\"))\nresetButton = Button(toolbar, image=lf_img00, relief=FLAT,command=mirror)\nresetButton.pack(side=RIGHT, padx=2, pady=2)\n\nlf_img01 = ImageTk.PhotoImage(Image.open(\"icons/rotation.png\"))\nresetButton = Button(toolbar, image=lf_img01, relief=FLAT,command=rotation)\nresetButton.pack(side=RIGHT, padx=2, pady=2)\n\nlf_img5a5 = ImageTk.PhotoImage(Image.open(\"icons/select.png\"))\nselectButton = Button(toolbar2, image=lf_img5a5, relief=FLAT, command=select)\nselectButton.pack( padx=2, pady=2)\n\nlf_img1 = ImageTk.PhotoImage(Image.open(\"icons/pencil.png\"))\nexitButton = Button(toolbar2, image=lf_img1, relief=FLAT, command=paint)\nexitButton.pack( padx=2, pady=2)\n\nlf_img2 = ImageTk.PhotoImage(Image.open(\"icons/rubber.png\"))\nexitButton = Button(toolbar2, image=lf_img2, relief=FLAT,command=errase)\nexitButton.pack( padx=2, pady=2)\n\nlf_img3 = ImageTk.PhotoImage(Image.open(\"icons/color.jpg\"))\ncolorButton = Button(toolbar2, fg='red',image=lf_img3, relief=FLAT,command=askingcolor)\ncolorButton.pack( padx=2, pady=2)\n\nlf_img4 = ImageTk.PhotoImage(Image.open(\"icons/crop.png\"))\ncolorButton = Button(toolbar2, fg='red',image=lf_img4, relief=FLAT,command=imgcrop)\ncolorButton.pack( padx=2, pady=2)\n\nlf_img5 = ImageTk.PhotoImage(Image.open(\"icons/text.png\"))\ncolorButton = Button(toolbar2, fg='red',image=lf_img5, relief=FLAT,command=imgtext)\ncolorButton.pack(side=RIGHT, padx=2, pady=2)\n\nimg223 = ImageTk.PhotoImage(Image.open(\"icons/inverse.png\"))\ninvButton = Button(toolbar, image=img223, relief=FLAT,command=imgreverse)\ninvButton.pack(side=RIGHT, padx=2, pady=2)\n\nimg222 = ImageTk.PhotoImage(Image.open(\"icons/graylevel.png\"))\nbgrButton = Button(toolbar, image=img222, relief=FLAT,command=imggraylevel)\nbgrButton.pack(side=RIGHT, padx=2, pady=2)\n\nimg22 = ImageTk.PhotoImage(Image.open(\"icons/blackwhite.png\"))\nbwButton = Button(toolbar, image=img22, relief=FLAT,command=imgbin)\nbwButton.pack(side=RIGHT, padx=2, pady=2)\n\nimg2 = ImageTk.PhotoImage(Image.open(\"icons/brightness_plus.png\"))\nbpButton = Button(toolbar, image=img2, relief=FLAT,command=imgbrightnessplus)\nbpButton.pack(side=RIGHT, padx=2, pady=2)\n\nimg3 = ImageTk.PhotoImage(Image.open(\"icons/brightness_moins.png\"))\nbmButton = Button(toolbar, image=img3, relief=FLAT, command=imgbrightnessmoins)\nbmButton.pack(side=RIGHT, padx=2, pady=2)\n\nimg4 = ImageTk.PhotoImage(Image.open(\"icons/save.png\"))\nsButton = Button(toolbar, image=img4, relief=FLAT, command=saveimgas)\nsButton.pack(side=RIGHT, padx=2, pady=2)\n\nimg5 = ImageTk.PhotoImage(Image.open(\"icons/open.png\"))\nOButton = Button(toolbar, image=img5, relief=FLAT, command=openimg)\nOButton.pack(side=RIGHT, padx=2, pady=2)\n\nimg77 = ImageTk.PhotoImage(Image.open(\"icons/new.png\"))\nOButton = Button(toolbar, image=img77, relief=FLAT, command=newimg)\nOButton.pack(side=RIGHT, padx=2, pady=2)\n \nmenubar = Menu(fenetre)\n\nmenu1 = Menu(menubar, tearoff=0)\nmenu1.add_command(label=\"Ouvrir\", command=openimg)\nmenu1.add_command(label=\"Enregistrer\", command=about)\nmenu1.add_command(label=\"Enregistrer sous\", command=saveimgas)\nmenu1.add_separator()\nmenu1.add_command(label=\"Quitter\", command=fenetre.destroy)\nmenubar.add_cascade(label=\"Fichier\", menu=menu1)\n\nmenu2 = Menu(menubar, tearoff=0)\nmenu2.add_command(label=\"Couper\", command=about)\nmenu2.add_command(label=\"Copier\", command=about)\nmenu2.add_command(label=\"Coller\", command=about)\nmenubar.add_cascade(label=\"Editer\", menu=menu2)\n\nmenu4 = Menu(menubar, tearoff=0)\nmenu4.add_command(label=\"Black and white\", command=imgbin)\nmenu4.add_command(label=\"Gray level\", command=imggraylevel)\nmenubar.add_cascade(label=\"Filtre\", menu=menu4)\n\nmenu3 = Menu(menubar, tearoff=0)\nmenu3.add_command(label=\"A propos\", command=about)\nmenubar.add_cascade(label=\"Aide\", menu=menu3)\n\nfenetre.config(menu=menubar)\n\n\nfenetre.mainloop()" }, { "alpha_fraction": 0.4290410876274109, "alphanum_fraction": 0.4482191801071167, "avg_line_length": 30.64601707458496, "blob_id": "81d5b93c3e52584c2b39bc8f1988fea19a7d5c5e", "content_id": "385bd8d03ffb7517e6af98d825e9ff214e7e4bb6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3650, "license_type": "no_license", "max_line_length": 59, "num_lines": 113, "path": "/filtres.py", "repo_name": "OuchattiKhadija/ImageProcessor", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 24 23:46:15 2020\n\n@author: imran\n\"\"\"\nfrom PIL import Image, ImageTk, ImageDraw\n\nclass Filtre:\n def __init__(self,img):\n self.img = img\n \n def binary(self,x1,y1,x2,y2):\n largeur , hauteur=self.img.size \n for y in range(x1,y1):\n for x in range(x1,y2):\n p=self.img.getpixel((x,y))\n avg = (p[0]+p[1]+p[2])/3\n if avg>=128:\n avg=256\n else:\n avg=0\n q=(avg,avg,avg)\n self.img.putpixel((x,y),q)\n \n return self.img\n \n def graylevel(self):\n largeur , hauteur=self.img.size \n for y in range(hauteur):\n for x in range(largeur):\n p=self.img.getpixel((x,y))\n avg = int((p[0]+p[1]+p[2])/3)\n q=(avg,avg,avg)\n self.img.putpixel((x,y),q)\n return self.img\n \n def brightness(self,value):\n largeur , hauteur=self.img.size \n for y in range(hauteur):\n for x in range(largeur):\n p=self.img.getpixel((x,y))\n q=(p[0]+value,p[1]+value,p[2]+value)\n self.img.putpixel((x,y),q)\n return self.img\n \n def reverse(self):\n largeur , hauteur=self.img.size \n for y in range(hauteur):\n for x in range(largeur):\n p=self.img.getpixel((x,y))\n q=(255-p[0],255-p[1],255-p[2])\n self.img.putpixel((x,y),q)\n return self.img\n \n def mirror(self):\n largeur , hauteur=self.img.size\n imgr = Image.new('RGB',(largeur,hauteur)) \n for y in range(hauteur):\n for x in range(largeur):\n p=self.img.getpixel((x,y))\n imgr.putpixel((largeur-x-1,y),p)\n return imgr\n \n def rotation(self):\n largeur , hauteur=self.img.size\n imgr = Image.new('RGB',(hauteur,largeur)) \n for y in range(hauteur):\n for x in range(largeur):\n p=self.img.getpixel((x,y))\n imgr.putpixel((y,x),p)\n return imgr\n \n def cut_color(self,color):\n largeur , hauteur=self.img.size\n for y in range(hauteur):\n for x in range(largeur):\n p=self.img.getpixel((x,y))\n if(color==\"red\"):\n q=(0,p[1],p[2])\n if(color==\"green\"):\n q=(p[0],0,p[2])\n if(color==\"blue\"):\n q=(p[0],p[1],0)\n self.img.putpixel((x,y),q)\n return self.img\n \n def red_plus(self,value):\n largeur , hauteur=self.img.size\n for y in range(hauteur):\n for x in range(largeur):\n p=self.img.getpixel((x,y))\n q=(p[0]+value,p[1],p[2])\n self.img.putpixel((x,y),q)\n return self.img\n \n def green_plus(self,value):\n largeur , hauteur=self.img.size\n for y in range(hauteur):\n for x in range(largeur):\n p=self.img.getpixel((x,y))\n q=(p[0],p[1]+value,p[2])\n self.img.putpixel((x,y),q)\n return self.img\n \n def blue_plus(self,value):\n largeur , hauteur=self.img.size\n for y in range(hauteur):\n for x in range(largeur):\n p=self.img.getpixel((x,y))\n q=(p[0],p[1],p[2]+value)\n self.img.putpixel((x,y),q)\n return self.img\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n " } ]
2
remko66/stock_predictions
https://github.com/remko66/stock_predictions
3d39632e2348554572df9f0e6f958fe79bd4d1d2
5f81725f8a858e9cfff2ad1ac0f4b3c385cb68dc
577dfc4b06ee6ea0e82c07514531c53743c3217a
refs/heads/master
2020-07-18T14:21:38.337928
2019-09-04T07:48:07
2019-09-04T07:48:07
206,262,136
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7166203856468201, "alphanum_fraction": 0.7382053732872009, "avg_line_length": 28.76146697998047, "blob_id": "2a5d3ee76fd49a0151ae28226a113e4213f34dbd", "content_id": "2047000e04739add50f0bef9405345820fbfc14f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3243, "license_type": "no_license", "max_line_length": 103, "num_lines": 109, "path": "/stocks.py", "repo_name": "remko66/stock_predictions", "src_encoding": "UTF-8", "text": "import math\nimport numpy as np\nimport pandas as pd\nimport datetime\nimport matplotlib.pyplot as plt\n\nimport pandas_datareader.data as web\nfrom pandas import Series, DataFrame\nimport matplotlib as mpl\nfrom sklearn import preprocessing\nfrom sklearn.linear_model import LinearRegression,Lasso,BayesianRidge\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsRegressor\n\nfrom sklearn.linear_model import Ridge\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.pipeline import make_pipeline\n\nstart = datetime.datetime(2010, 1, 1)\nend = datetime.datetime(2019, 2, 9)\n\ndf = web.DataReader(\"AAPL\", 'yahoo', start, end)\nprint(df.tail())\n\n\nclose_px = df['Adj Close']\nmavg = close_px.rolling(window=100).mean()\ndfreg = df.loc[:,['Adj Close','Volume']]\n\ndfreg['HL_PCT'] = (df['High'] -df['Low']) / df['Close'] * 100.0\ndfreg['PCT_change'] = (df['Close'] - df['Open']) / df['Open'] * 100.0\n\n\n\n# Drop missing value\ndfreg.fillna(value=-99999, inplace=True)\n# We want to separate 1 percent of the data to forecast\nforecast_out = int(math.ceil(0.01 * len(dfreg)))\n# Separating the label here, we want to predict the AdjClose\nforecast_col = 'Adj Close'\ndfreg['label'] = dfreg[forecast_col].shift(-forecast_out)\nX = np.array(dfreg.drop(['label'], 1))\n# Scale the X so that everyone can have the same distribution for linear regression\nX = preprocessing.scale(X)\n# Finally We want to find Data Series of late X and early X (train) for model generation and evaluation\nX_lately = X[-forecast_out:]\nX = X[:-forecast_out]\n# Separate label and identify it as y\ny = np.array(dfreg['label'])\ny = y[:-forecast_out]\nX_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.1, random_state=42)\n\n# Linear regression\nclfreg = LinearRegression(n_jobs=-1)\nclfreg.fit(X_train, y_train)\n# Quadratic Regression 2\nclfpoly2 = make_pipeline(PolynomialFeatures(2), Ridge())\nclfpoly2.fit(X_train, y_train)\n\n# Quadratic Regression 3\nclfpoly3 = make_pipeline(PolynomialFeatures(3), Ridge())\nclfpoly3.fit(X_train, y_train)\nclfknn = KNeighborsRegressor(n_neighbors=2)\nclfknn.fit(X_train, y_train)\n\nclflasso=Lasso()\nclflasso.fit(X_train,y_train)\n\nclfbayes=BayesianRidge()\nclfbayes.fit(X_train,y_train)\n\nconfidencereg = clfreg.score(X_test, y_test)\nconfidencepoly2 = clfpoly2.score(X_test,y_test)\nconfidencepoly3 = clfpoly3.score(X_test,y_test)\nconfidenceknn = clfknn.score(X_test, y_test)\nconfidencebayes=clfbayes.score(X_test,y_test)\nconfidencelasso=clflasso.score(X_test,y_test)\n\nprint(confidencebayes,confidencelasso)\n\nforecast_set = clfpoly3.predict(X_lately)\ndfreg['Poly3'] = None\ndfreg['Poly3'][-1*len(forecast_set):]=forecast_set\n\nforecast_set = clfknn.predict(X_lately)\ndfreg['knn'] = None\ndfreg['knn'][-1*len(forecast_set):]=forecast_set\n\nforecast_set = clfbayes.predict(X_lately)\ndfreg['Bayes'] = None\ndfreg['Bayes'][-1*len(forecast_set):]=forecast_set\n\n\nforecast_set = clflasso.predict(X_lately)\ndfreg['Lasso'] = None\ndfreg['Lasso'][-1*len(forecast_set):]=forecast_set\n\n\ndfreg['Adj Close'].tail(30).plot()\ndfreg['Poly3'].tail(30).plot()\ndfreg['Bayes'].tail(30).plot()\ndfreg['Lasso'].tail(30).plot()\ndfreg['knn'].tail(30).plot()\n\nplt.legend(loc=0)\nplt.xlabel('Date')\nplt.ylabel('Price')\nprint(dfreg)\nplt.show()" } ]
1
LeonWolber/Heart-Failure-Research
https://github.com/LeonWolber/Heart-Failure-Research
c45dc5f550c211566d46cdcf4ccc70c7f6e28d0d
db0d5a98d6b742c966c3ed52be840b9e4efb83aa
afdf206d73f939bf0477ce0c5a8fe518a5cd8c2d
refs/heads/master
2022-11-11T09:58:19.347177
2021-05-08T18:10:14
2021-05-08T18:10:14
273,791,682
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6665568947792053, "alphanum_fraction": 0.67303466796875, "avg_line_length": 31.992536544799805, "blob_id": "8fc37a0db078646ee20fd3b08074c64f162d8a12", "content_id": "845865e056b1abfa1d111f188d16b00072d78417", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9108, "license_type": "no_license", "max_line_length": 196, "num_lines": 268, "path": "/NLP_Heart_Failure.py", "repo_name": "LeonWolber/Heart-Failure-Research", "src_encoding": "UTF-8", "text": "import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nimport os\r\nimport re\r\nimport string\r\n\r\nimport nltk\r\nfrom nltk.probability import FreqDist\r\nfrom nltk.stem import WordNetLemmatizer\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.stem import SnowballStemmer\r\nfrom nltk import pos_tag\r\nfrom nltk.stem import WordNetLemmatizer\r\nfrom nltk.tokenize import word_tokenize\r\n\r\nfrom wordcloud import WordCloud\r\nfrom tqdm import tqdm\r\nimport matplotlib.style as style\r\nstyle.use('fivethirtyeight')\r\nfrom sklearn.metrics import plot_roc_curve\r\nfrom numpy import interp\r\nfrom itertools import cycle\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.metrics import roc_curve, auc\r\n\r\nfrom numpy import array\r\nfrom keras.preprocessing.text import one_hot\r\nfrom keras.preprocessing.sequence import pad_sequences\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\nfrom keras.layers import Flatten\r\nfrom keras.layers.embeddings import Embedding\r\nfrom keras.layers import LSTM, Bidirectional\r\nfrom keras import optimizers\r\nfrom keras.callbacks import ModelCheckpoint\r\nfrom keras.callbacks import EarlyStopping\r\nfrom keras.layers import Layer\r\n\r\nfrom collections import Counter\r\n\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.model_selection import StratifiedKFolD\r\nfrom sklearn.metrics import confusion_matrix, classification_report\r\n\r\n\r\ndef read_data():\r\n # load data\r\n df = pd.read_csv('subset_documents.cvs', sep = '\\t')\r\n # create binary label column based on date of death \r\n df['death_outcome'] = np.where(df['Datum van overlijden'].isna() == True , 0, 1)\r\n df['death_outcome'] = pd.Categorical(df['death_outcome'])\r\n \r\n # fill NaN to empty string\r\n df = df.replace(np.nan, '', regex=True) \r\n\r\n\r\n\r\ndef remove_line_breaks(text):\r\n text = text.replace('\\r', ' ').replace('\\n', ' ')\r\n return text\r\n\r\ndef remove_special_characters(text):\r\n text = re.sub('[^a-zA-z0-9\\s]', '', text)\r\n return text\r\n\r\ndef lowercase(text):\r\n text_low = [token.lower() for token in word_tokenize(text)]\r\n return ' '.join(text_low)\r\n\r\ndef remove_stopwords(text):\r\n stop = set(stopwords.words('dutch'))\r\n word_tokens = nltk.word_tokenize(text)\r\n text = \" \".join([word for word in word_tokens if word not in stop])\r\n return text\r\n\r\n#remove punctuation\r\ndef remove_punctuation(text):\r\n re_replacements = re.compile(\"__[A-Z]+__\") # such as __NAME__, __LINK__\r\n re_punctuation = re.compile(\"[%s]\" % re.escape(string.punctuation))\r\n '''Escape all the characters in pattern except ASCII letters and numbers: word_tokenize('ebrahim^hazrati')'''\r\n tokens = word_tokenize(text)\r\n tokens_zero_punctuation = []\r\n for token in tokens:\r\n if not re_replacements.match(token):\r\n token = re_punctuation.sub(\" \", token)\r\n tokens_zero_punctuation.append(token)\r\n return ' '.join(tokens_zero_punctuation)\r\n\r\n#remobe one character words\r\ndef remove_one_character_words(text):\r\n '''Remove words from dataset that contain only 1 character'''\r\n text_high_use = [token for token in word_tokenize(text) if len(token)>1] \r\n return ' '.join(text_high_use) \r\n\r\n##remove specific word list\r\ndef remove_special_words(text):\r\n '''Remove the User predefine useless words from the text. The list should be in the lowercase.'''\r\n special_words_list=['af', 'iv', 'ivm', 'mg', 'dd', 'vrijdag','afspraak','over','met', 'van', 'patient', 'dr', 'geyik','heyman','bekker','dries','om', 'sel', 'stipdonk', 'eurling', 'knackstedt'\r\n 'lencer','volder','schalla']# list : words\r\n querywords=text.split()\r\n textwords = [word for word in querywords if word.lower() not in special_words_list]\r\n text=' '.join(textwords)\r\n return text\r\n \r\n#%%\r\n# Stemming with 'Snowball Dutch stemmer\" package\r\ndef stem(text):\r\n stemmer = nltk.stem.snowball.SnowballStemmer('dutch')\r\n text_stemmed = [stemmer.stem(token) for token in word_tokenize(text)] \r\n return ' '.join(text_stemmed)\r\n\r\ndef lemma(text):\r\n wordnet_lemmatizer = WordNetLemmatizer()\r\n word_tokens = nltk.word_tokenize(text)\r\n text_lemma = \" \".join([wordnet_lemmatizer.lemmatize(word) for word in word_tokens]) \r\n return ' '.join(text_lemma)\r\n\r\n\r\n#break sentences to individual word list\r\ndef sentence_word(text):\r\n word_tokens = nltk.word_tokenize(text)\r\n return word_tokens\r\n#break paragraphs to sentence token \r\ndef paragraph_sentence(text):\r\n sent_token = nltk.sent_tokenize(text)\r\n return sent_token \r\n\r\n\r\ndef tokenize(text):\r\n \"\"\"Return a list of words in a text.\"\"\"\r\n return re.findall(r'\\w+', text)\r\n\r\ndef remove_numbers(text):\r\n no_nums = re.sub(r'\\d+', '', text)\r\n return ''.join(no_nums)\r\n\r\n\r\n\r\ndef normalization_pitchdecks(text):\r\n _steps = [\r\n remove_line_breaks,\r\n remove_one_character_words,\r\n remove_special_characters,\r\n lowercase,\r\n remove_punctuation,\r\n remove_stopwords,\r\n remove_special_words,\r\n stem,\r\n remove_numbers\r\n]\r\n for step in _steps:\r\n text=step(text)\r\n return text \r\n\r\n\r\n\r\ndef clean_text(df):\r\n # apply all cleaning ufnctions on each of the 4 text columns\r\n # we don't join all text before cleaning, this allows to only train on certain text columns and disregarding others\r\n text_columns = ['Tekst1','Tekst2','Tekst3','Tekst4']\r\n \r\n for column in text_columns:\r\n df[column] = [normalization_pitchdecks(txt) for txt in df[column]]\r\n \r\n \r\n # create column of joined text\r\n df[\"joined_text\"] = df[\"Tekst1\"] +\" \"+ df[\"Tekst2\"] +\" \"+ df['Tekst3'] +\" \"+ df['Tekst4']\r\n return df\r\n\r\n\r\ndef prepare_data(df):\r\n # Group by 'Patientnr' and 'label', and join together the different text fields for every patient\r\n combined = df.groupby(['Patnr', 'death_outcome'])['joined_text'].apply(lambda x: ' '.join(x.astype(str))).reset_index()\r\n \r\n # full text (per patient bc. of sequential data) to train model\r\n docs = combined['joined_text'].tolist()\r\n \r\n # label\r\n labels = combined['death_outcome'].values\r\n \r\n \r\n # check number of unique words to estimate a reasonable vocab size\r\n one_str = ''.join(docs)\r\n amount_unique_words = Counter(one_str.split())\r\n print(f'No. of unique words used in corpus: {amount_unique_words}') \r\n \r\n return combined, docs, labels, amount_unique_words\r\n \r\n\r\ndef encode_documents(docs, word_amount):\r\n # translate words into one hot vectors according to corpus size of unique words\r\n # represent words as their position in the resulting sparse matrix\r\n # patient -> [435] instead of [0,0,0, ... 1,0,0,0] (1 is at pos. 435)\r\n encoded_docs = [one_hot(d, word_amount) for d in docs]\r\n return encoded_docs\r\n\r\n\r\ndef max_doc_length(encoded_docs):\r\n\t# find maximum number of words used in a single document\r\n return max([len(i) for i in encoded_docs])\r\n\r\n\r\ndef pad_docs(enc_docs, max_length):\r\n\t# pad all documents to standardize length - important for embedding layer\r\n padded_docs = pad_sequences(encoded_docs, maxlen=max_length, padding='post')\r\n return padded_docs\r\n\r\n\r\n\r\n\r\ndef build_lstm(max_lenght, vocabe_size):\r\n model = Sequential()\r\n model.add(Embedding(input_dim = vocab_size, output_dim = 32, input_length = max_length))\r\n model.add(Bidirectional(LSTM(128, activation='linear',return_sequences=True)))\r\n model.add(Bidirectional(LSTM(64, activation='linear')))\r\n model.add(Dense(32, activation='linear'))\r\n model.add(Dense(1, activation='sigmoid'))\r\n\r\n # compile the model\r\n sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)\r\n\r\n model.compile(optimizer=sgd, loss='binary_crossentropy', metrics=['accuracy'])\r\n print(model.summary())\r\n return model\r\n\r\n\r\n\r\ndef create_partitions(padded_docs, labels):\r\n X = padded_docs\r\n y = labels\r\n \r\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\r\n return X_train, X_test, y_train, y_test\r\n\r\n\r\ndef performance_check(model, y_test):\r\n vecint = np.vectorize(int)\r\n prediction=vecint((prediction_proba>0.519))\r\n\r\n print(confusion_matrix(y_test,prediction))\r\n print(classification_report(y_test,prediction))\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n\tdf = read_data()\r\n\tdf_clean = clean_text(df) \r\n\tcombined_df, docs, labels, vocabe_size = prepare_data(df_clean)\r\n\tenc_docs = encode_documents(docs, vocabe_size)\r\n\tmax_lenght = max_doc_length(enc_docs)\r\n\tpadded_ = pad_docs(enc_docs, max_lenght)\r\n\r\n\tX_train, X_test, y_train, y_test = create_partitions(padded_, labels)\r\n\r\n\tmodel = build_lstm(max_lenght, vocabe_size)\r\n\r\n\r\n\ttensorboad = tf.keras.callbacks.TensorBoard(logdir=f'log/{datetime.datetime.now().strftime('%H-%M-%S')}')\r\n\tes = tf.keras.callbacks.EarlyStopping(monitor='val_loss' patience=5, mode='min')\r\n\tcheck = tf.keras.callbacks.ModelCheckpoint('model_checkpoint', monitor='val_loss')\r\n\t# fit the model\r\n\tmodel.fit(X_train, y_train, epochs=100, verbose=1, validation_data=(X_val, y_val), callbacks=[es, tensorboard, check])\r\n\r\n\r\n\r\n\tperformance_check(model, y_test)" }, { "alpha_fraction": 0.8231707215309143, "alphanum_fraction": 0.8231707215309143, "avg_line_length": 64.4000015258789, "blob_id": "a8ad5cbf51dc690cd467866bc940d635fb909a17", "content_id": "8c86f592e8b9521fac4fa03c9be3525ef93f3626", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 328, "license_type": "no_license", "max_line_length": 139, "num_lines": 5, "path": "/README.md", "repo_name": "LeonWolber/Heart-Failure-Research", "src_encoding": "UTF-8", "text": "## NLP & Deep Learning with electronic health records of Heart Failure Patients\n\n\nIn collaboration with the local hospital I work with electronic health records of heart failure patient.\nUsing natural language processing I explore the clinical notes and apply deep learning to extract potential new insights about the disease. \n" } ]
2
AyushTyagi07/Accountiee
https://github.com/AyushTyagi07/Accountiee
0a49fffcd6e2db3defee968891c8864b8ba7097e
9ce0603c450ae4fc4a51ded753a63ca38d9975b2
6f54816058b420e6149a900444900da9f65cb3fd
refs/heads/master
2023-01-22T14:24:04.261244
2020-03-06T09:35:38
2020-03-06T09:35:38
234,856,700
0
0
Apache-2.0
2020-01-19T07:09:50
2020-03-06T09:35:42
2023-01-11T02:46:00
CSS
[ { "alpha_fraction": 0.6572989821434021, "alphanum_fraction": 0.659640908241272, "avg_line_length": 31.049999237060547, "blob_id": "d25e5f18dc97ce8f5eac6eabf05719a5967fea7f", "content_id": "ed85c1fd30cea090723295b1625bd5fc39b85506", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1281, "license_type": "permissive", "max_line_length": 190, "num_lines": 40, "path": "/engine/core.py", "repo_name": "AyushTyagi07/Accountiee", "src_encoding": "UTF-8", "text": "import sqlite3\nimport pandas as pd\nimport sys\n \n\nconn = sqlite3.connect('datast.sqlite3')\nprint(\"Opened database successfully\")\n\ndef insertperson(name,abbr,ptype,pending,contact,gstid):\n conn.execute(\"INSERT INTO Person (Name ,Abbr,Type,Pending,Contact,GSTID) VALUES ('\"+str(name)+\"','\"+str(abbr)+\"','\"+str(ptype)+\"',\"+int(pending)+\",'\"+str(contact)+\"','\"+str(gstid)+\"');\")\n conn.commit()\ndef insertentry(Stype,Amount,Date,pid):\n conn.execute(\"INSERT INTO Transaction (Stype,Amount,Date) VALUES ('\"+str(Stype)+\"',\"+int(Amount)+\",'\"+str(Date)+\"', \"+int(pid)+\");\")\n conn.commit()\n\ndef getallperson():\n df = pd.read_sql_query(\"SELECT * FROM Person ;\", conn)\n return df\n\ndef getalltransactions(pid):\n df = pd.read_sql_query(\"SELECT * FROM Transactions where pid=\"+str(pid)+\" ;\", conn)\n return df\n\ndef getalloptions():\n df = pd.read_sql_query(\"SELECT pid FROM Person ;\", conn)\n return df\n\ndef getallpending():\n df = pd.read_sql_query(\"SELECT pending FROM Person ;\", conn)\n return df\n\ndef getpending(pid):\n df = pd.read_sql_query(\"SELECT pending FROM Person where pid =\"+str(pid)+\" ;\", conn)\n return df\n\ndef getentrycount():\n df = pd.read_sql_query(\"SELECT COUNT() FROM Transactions ;\", conn)\n return df\n\n'''DATABASE Functions END'''" }, { "alpha_fraction": 0.746268630027771, "alphanum_fraction": 0.746268630027771, "avg_line_length": 32.66666793823242, "blob_id": "a58c69886df0b50770d1d623f0b4339efd560ead", "content_id": "aff5cad955e841cc5a29b8b724bfeeffc2d7e307", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 201, "license_type": "permissive", "max_line_length": 56, "num_lines": 6, "path": "/renderer.js", "repo_name": "AyushTyagi07/Accountiee", "src_encoding": "UTF-8", "text": "console.log(\"Renderer\");\nconst printPDFBtn = document.getElementById('print-pdf')\nconsole.log(\"pdf\"+ printPDFBtn);\nprintPDFBtn.addEventListener('click', function (event) {\n ipc.send('print-to-pdf')\n})" }, { "alpha_fraction": 0.5481611490249634, "alphanum_fraction": 0.556917667388916, "avg_line_length": 23.826086044311523, "blob_id": "eb0ed32dad91ccc0238122b3b2e112eef9ef8203", "content_id": "5c51e88cfa0faf7a0018b02d353b5dbf4a52e533", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 571, "license_type": "permissive", "max_line_length": 54, "num_lines": 23, "path": "/selectora.js", "repo_name": "AyushTyagi07/Accountiee", "src_encoding": "UTF-8", "text": "var currentBoxNumber = 0;\nvar textboxes;\nvar nextBox;\n\n(function($) {\n\n \"use strict\";\n\n jQuery('.form-control').keyup(function (event) {\n if (event.keyCode == 13) {\n textboxes = $(\".form-control\");\n currentBoxNumber = textboxes.index(this);\n console.log(textboxes.index(this));\n if (textboxes[currentBoxNumber + 1] != null) {\n nextBox = textboxes[currentBoxNumber + 1];\n nextBox.focus();\n nextBox.select();\n event.preventDefault();\n return false;\n }\n }\n});\n})(jQuery);\n" } ]
3
urbennoroac/Coursera-Data-Visualization
https://github.com/urbennoroac/Coursera-Data-Visualization
0327403ce80bcc12c38523cd5ae89ce26f01643e
ddff01c7395b124dbec13f22190ac5e93db7098f
e8eef282e244788ea1043df32c919788dfb9e648
refs/heads/master
2021-05-29T23:36:56.152525
2015-10-28T09:34:11
2015-10-28T09:34:11
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7206477522850037, "alphanum_fraction": 0.7429149746894836, "avg_line_length": 25.7297306060791, "blob_id": "c41b66635cde69c48fd694d56e6c820c45150ac6", "content_id": "97f8f78b77bea726e944d554d62a1db4dd51d6e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 988, "license_type": "no_license", "max_line_length": 87, "num_lines": 37, "path": "/week2_nan.py", "repo_name": "urbennoroac/Coursera-Data-Visualization", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 23 01:02:25 2015\n\n@author: Mind\n\"\"\"\n\n\nimport pandas\nimport numpy\n\n#import data\ndata = pandas.read_csv('gapminder.csv', low_memory=False)\n\n# bug fix for display formats to avoid run time errors\npandas.set_option('display.float_format', lambda x:'%f'%x)\n\n# Covert text string to upper (or lower)\n#data.columns = map(str.upper, data.columns)\n\n# To check numbers of rows and columns\n#print(len(data))\n#print(len(data.columns))\n\n# Before start counting \n# Convert strings to numeric\ndata['internetuserate'] = data['internetuserate'].convert_objects(convert_numeric=True)\ndata['hivrate'] = data['hivrate'].convert_objects(convert_numeric=True)\ndata['suicideper100th'] = data['suicideper100th'].convert_objects(convert_numeric=True)\n\n\nlistinternet = data['internetuserate']\nprint numpy.sum(listinternet.isnull())\nlisthiv = data['hivrate']\nprint numpy.sum(listhiv.isnull())\nlistsuicide = data['suicideper100th']\nprint numpy.sum(listsuicide.isnull())" }, { "alpha_fraction": 0.5802757740020752, "alphanum_fraction": 0.6316896677017212, "avg_line_length": 24.01754379272461, "blob_id": "5c165eb923abe670d4118decfaad3a4f8ba0037f", "content_id": "6e1bd200ef64ba5cd90c5eb241138a936d3c33f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4279, "license_type": "no_license", "max_line_length": 96, "num_lines": 171, "path": "/temp.py", "repo_name": "urbennoroac/Coursera-Data-Visualization", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\nimport pandas\n\n#import data\ndata = pandas.read_csv('gapminder.csv', low_memory=False)\n\n# bug fix for display formats to avoid run time errors\npandas.set_option('display.float_format', lambda x:'%f'%x)\n\n# Covert text string to upper (or lower)\n#data.columns = map(str.upper, data.columns)\n\n# To check numbers of rows and columns\n#print(len(data))\n#print(len(data.columns))\n\n# Before start counting \n# Convert strings to numeric\ndata['internetuserate'] = data['internetuserate'].convert_objects(convert_numeric=True)\ndata['hivrate'] = data['hivrate'].convert_objects(convert_numeric=True)\ndata['suicideper100th'] = data['suicideper100th'].convert_objects(convert_numeric=True)\n\n# Divide Internetuserate into 9 groups\ninternetusegroup = []\nfor i,j in enumerate(data['internetuserate']):\n if j < 5:\n internetusegroup.append(\"1\")\n elif j < 15:\n internetusegroup.append(\"2\")\n elif j < 25:\n internetusegroup.append(\"3\")\n elif j < 35:\n internetusegroup.append(\"4\")\n elif j < 45:\n internetusegroup.append(\"5\") \n elif j < 55:\n internetusegroup.append(\"6\")\n elif j < 65:\n internetusegroup.append(\"7\") \n elif j < 75:\n internetusegroup.append(\"8\")\n else:\n internetusegroup.append(\"9\")\n \n#initial counts\ncounter1 = 0\ncounter2 = 0\ncounter3 = 0\ncounter4 = 0\ncounter5 = 0\ncounter6 = 0\ncounter7 = 0\ncounter8 = 0\ncounter9 = 0\n\n#use traditional counter to count the number for each case\nfor item in internetusegroup:\n if item == \"1\":\n counter1 += 1\n elif item == \"2\":\n counter2 += 1\n elif item == \"3\":\n counter3 += 1\n elif item == \"4\":\n counter4 += 1\n elif item == \"5\":\n counter5 += 1\n elif item == \"6\":\n counter6 += 1\n elif item == \"7\":\n counter7 += 1\n elif item == \"8\":\n counter8 += 1 \n else:\n counter9 += 1\n\ntotal = counter1+counter2+counter3+counter4+counter5+counter6+counter7+counter8+counter9\n\n#To display internetusegroup in table\ncountername = [\"<5%\",\"5-15%\",\"15-25%\",\"25-35%\",\"35-45%\",\"45-55%\",\"55-65%\",\"65-75%\",\">75%\"]\ncounterlist = [counter1,counter2,counter3,counter4,counter5,counter6,counter7,counter8,counter9]\npercentcount = [x/float(total)*100 for x in counterlist]\ndata1 = pandas.DataFrame(countername)\ndata1[\"Count\"] = counterlist\ndata1[\"Percentage\"] = percentcount\nprint data1\n\n###############################################\n\n# Divide hivrate into 3 groups\nhivgroup = []\nfor i,j in enumerate(data['hivrate']):\n if j < 5:\n hivgroup.append(\"1\")\n elif j < 10:\n hivgroup.append(\"2\")\n else:\n hivgroup.append(\"3\")\n\n#initial counts\ncounter1 = 0\ncounter2 = 0\ncounter3 = 0\n\n\n#use traditional counter to count the number for each case\nfor item in hivgroup:\n if item == \"1\":\n counter1 += 1\n elif item == \"2\":\n counter2 += 1 \n else:\n counter3 += 1\n\ntotal = counter1+counter2+counter3\n\n#To display internetusegroup in table\ncountername = [\"HIV <5%\",\"HIV 5-10%\",\"HIV >10&\"]\ncounterlist = [counter1,counter2,counter3]\npercentcount = [x/float(total)*100 for x in counterlist]\ndata2 = pandas.DataFrame(countername)\ndata2[\"Count\"] = counterlist\ndata2[\"Percentage\"] = percentcount\nprint \" \"\nprint data2\n\n\n######################################################################\n\n# Divide suicideper100th into 3 groups\nsuicidegroup = []\nfor i,j in enumerate(data['suicideper100th']):\n if j < 10:\n suicidegroup.append(\"1\")\n elif j < 20:\n suicidegroup.append(\"2\")\n else:\n suicidegroup.append(\"3\")\n\n#initial counts\ncounter1 = 0\ncounter2 = 0\ncounter3 = 0\n\n\n#use traditional counter to count the number for each case\nfor item in suicidegroup:\n if item == \"1\":\n counter1 += 1\n elif item == \"2\":\n counter2 += 1 \n else:\n counter3 += 1\n\ntotal = counter1+counter2+counter3\n\n#To display internetusegroup in table\ncountername = [\"Suicide <10%\",\"Suicide 10-20%\",\"Suicide >20%\"]\ncounterlist = [counter1,counter2,counter3]\npercentcount = [x/float(total)*100 for x in counterlist]\ndata3 = pandas.DataFrame(countername)\ndata3[\"Count\"] = counterlist\ndata3[\"Percentage\"] = percentcount\nprint \" \"\nprint data3\n\n" }, { "alpha_fraction": 0.6552962064743042, "alphanum_fraction": 0.6864153146743774, "avg_line_length": 28.3157901763916, "blob_id": "9534cab2ada26aa56677f245c97cb5d4d28601ac", "content_id": "530c3f715eb3616a45e89e9e0ca685b2afc1bfdd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1673, "license_type": "no_license", "max_line_length": 123, "num_lines": 57, "path": "/wk3_assignment.py", "repo_name": "urbennoroac/Coursera-Data-Visualization", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 28 17:01:20 2015\n\n@author: Mind\n\"\"\"\n\nimport pandas\nimport numpy\n\n#import data\ndata = pandas.read_csv('gapminder.csv', low_memory=False)\n\n# bug fix for display formats to avoid run time errors\npandas.set_option('display.float_format', lambda x:'%f'%x)\n\n# Convert strings to numeric\ndata['internetuserate'] = data['internetuserate'].convert_objects(convert_numeric=True)\ndata['hivrate'] = data['hivrate'].convert_objects(convert_numeric=True)\ndata['suicideper100th'] = data['suicideper100th'].convert_objects(convert_numeric=True)\n\n\n\ndata['internetQ']= pandas.qcut(data['internetuserate'],3,labels=['Low','Average','High'])\nc1 = data['internetQ'].value_counts(sort=False, dropna=False)\np1 = data['internetQ'].value_counts(sort=False, dropna=False, normalize=True)\nprint '#### Internet Use Rate ###'\nprint 'Group', ' Counts'\nprint c1\nprint \" \"\nprint 'Group', ' Percentage'\nprint p1\n\n\ndata['hivQ']= pandas.qcut(data['hivrate'],3,labels=['Low','Average','High'])\nc2 = data['hivQ'].value_counts(sort=False, dropna=False)\np2 = data['hivQ'].value_counts(sort=False, dropna=False, normalize=True)\n\nprint \" \"\nprint '#### HIV Rate ###'\nprint 'Group', ' Counts'\nprint c2\nprint \" \"\nprint 'Group', ' Percentage'\nprint p2\n\ndata['suicideQ']= pandas.cut(data['suicideper100th'],[0,10,20,100],labels=['Suicide <10%','Suicide 10–20%','Suicide >20%'])\nc2 = data['suicideQ'].value_counts(sort=False, dropna=False)\np2 = data['suicideQ'].value_counts(sort=False, dropna=False, normalize=True)\n\nprint \" \"\nprint '#### Suicide Rate ###'\nprint 'Group', ' Counts'\nprint c2\nprint \" \"\nprint 'Group', ' Percentage'\nprint p2\n" }, { "alpha_fraction": 0.6612685322761536, "alphanum_fraction": 0.7049033045768738, "avg_line_length": 26.085365295410156, "blob_id": "25975bc04556d49b65ead06e00a0cd5defef65b4", "content_id": "dc8ab6194b343738ce7705da29be3d28cf062d53", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2223, "license_type": "no_license", "max_line_length": 87, "num_lines": 82, "path": "/week3.py", "repo_name": "urbennoroac/Coursera-Data-Visualization", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 28 15:39:11 2015\n\n@author: Mind\n\"\"\"\n\nimport pandas\nimport numpy\n\n#import data\ndata = pandas.read_csv('gapminder.csv', low_memory=False)\n\n# bug fix for display formats to avoid run time errors\npandas.set_option('display.float_format', lambda x:'%f'%x)\n\n# Covert text string to upper (or lower)\n#data.columns = map(str.upper, data.columns)\n\n# To check numbers of rows and columns\n#print(len(data))\n#print(len(data.columns))\n\n# Before start counting \n# Convert strings to numeric\ndata['internetuserate'] = data['internetuserate'].convert_objects(convert_numeric=True)\ndata['hivrate'] = data['hivrate'].convert_objects(convert_numeric=True)\ndata['suicideper100th'] = data['suicideper100th'].convert_objects(convert_numeric=True)\n\n\n# replace group 9 to NaN\ndata['internetuserate'] = data['internetuserate'].replace(9, numpy.nan)\n\n#to include NA\ncs = data['internetuserate'].value_counts(dropna = False, sort = False)\n\n# set 11 as dummy\ndata.loc[data['internetuserate'].isnull(),'internetuserate'] = 11 \n\n# dictionary to recode \nrecode1 = {1:6 ,2:5, 3:4} #old value = new value\ndata['OURFREQ'] = data['internetuserate'].map(recode1)\n\n# to capture more quantitative features\nrecode2 = {1: '30 days', 2: '22 days'}\ndata['OURFREQbyMonth'] = data['internetuserate'].map(recode2)\n\n# for example\ndata['Estimatedaysmoke'] = data['OURFREQbyMonth']*data['OURFREQperday']\n# get first 25 lines\ndata['internetuserate'].head(25)\n\n# always check for errors\nsub3 = data[['internetuserate','hivrate','suicideper100th']]\nsub3.head(25)\n\n# to divide into groups using lambda function\ndef ETHNICITY(row):\n if row['NUMETHNIC'] > 1:\n return 1\n if row['H1GI4'] == 1:\n return 2\n \n \ndata['ETHNICITY'] = data.apply(lambda row: ETHNICITY(row), axis =1)\n\nsub2 = data[['AID','H1GI4']]\nprint sub2.head(25)\n\n\n# compare age group :\n# check the freq distr. first\n# if you want to cut into 4 groups cut according to quartiles\n\nsub2['agegroupto4']= pandas.qcut(sub2.AGE,4,labels=['A','B','C','D'])\n\n# customize split\nsub2['agebyourgroup3'] = pandas.cut(sub2.AGE,[17,20,22,25])\n# 18-20, 21-22, 22-25\n\n# cross tab function:\nprint pandas.crosstab(sub2['agebyourgroup3'], sub2['agegroupto4'])\n\n\n" } ]
4
sKannanaikal/Image-Metadata-Checker
https://github.com/sKannanaikal/Image-Metadata-Checker
6f0060ee335bbabafd15225eb749d8007040767e
0c074b2d99ca45e3164d797e62957558a7554a1b
1ed98d25dd9aa37c475a57a6a3de9174db811215
refs/heads/main
2023-06-02T00:14:06.401436
2021-06-20T00:19:09
2021-06-20T00:19:09
378,527,385
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8061224222183228, "alphanum_fraction": 0.8061224222183228, "avg_line_length": 97, "blob_id": "0315dd21fe50f761c050ec5dfe42e41933f9c154", "content_id": "5f208428d25940f6c4f1d3d6ca8647f13c8a56b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 196, "license_type": "no_license", "max_line_length": 170, "num_lines": 2, "path": "/README.md", "repo_name": "sKannanaikal/Image-Metadata-Checker", "src_encoding": "UTF-8", "text": "# Image-Metadata-Checker\nThis python script allows users to enter a url and then it will proceed to attempt to find all the images on said set and identify all available metadata that it may have\n" }, { "alpha_fraction": 0.7139973044395447, "alphanum_fraction": 0.7187079191207886, "avg_line_length": 26.537036895751953, "blob_id": "b15daaff3e639d84dd2fedbde42cb836103c7a85", "content_id": "caa887ccf97762e969dd60fc85a4f486ea132ee5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1486, "license_type": "no_license", "max_line_length": 158, "num_lines": 54, "path": "/script.py", "repo_name": "sKannanaikal/Image-Metadata-Checker", "src_encoding": "UTF-8", "text": "import optparse\nimport urlib2\nfrom bs4 import BeautifulSoup\nfrom urlparse import urlsplit\nfrom os.path import basename\nfrom PIL import Image\n\nNUMOFIMAGES = 0\n\ndef locateImages(url):\n\tprint('[+] Searching {website} for all images'.format(website=url))\n\twebsite = urlib2.urlopen(url).read()\n\tsoup = BeautifulSoup(website)\n\timages = soup.findAll('img')\n\treturn images\n\ndef downloadImage(image):\n\ttry:\n\t\tsource = image['src']\n\t\timageContent = urlib2.urlopen(source).read()\n\t\tNUMOFIMAGES += 1\n\t\timageName = basename(urlsplit(source)[2])\n\t\tlocalCopy = open(imageName, 'wb')\n\t\tlocalCopy.write(imageContent)\n\t\tlocalCopy.close()\n\t\treturn imageName\n\texcept:\n\t\treturn None\n\ndef checkForMetaData(imageName):\n\ttry:\n\t\timage = Image.open(imageName)\n\t\tmetadata = image._getexif()\n\t\tif metadata:\n\t\t\tfor (tag, value) in metadata:\n\t\t\t\tprint(\"[+] {title} : {item}\".format(title=tag, item=value))\n\t\telse:\n\t\t\tprint(\"[-] File Does Not Have Any Metadata Available\")\n\texcept:\n\t\treturn None\n\n\ndef main():\n\tcommand = optparse.OptionParser('usage%prog -u <target url>')\n\tcommand.add_option('-u', dest='target', type='string', help='specify the target url you want to download from')\n\turl = command.target\n\timages = locateImages(url)\n\tfor image in images:\n\t\timageName = downloadImage(image)\n\t\tcheckForMetaData(imageName)\n\tprint('[+] A total of {count} images were found on {website}! They have all been reviewed for inclusion of metadata!'.format(count=NUMOFIMAGES, website=url))\n\nif __name__ == \"__main__\":\n\tmain()" } ]
2
bikashkarmokar/RandomPythonCodes
https://github.com/bikashkarmokar/RandomPythonCodes
13193cce854745e57ddcf3a409a121bc0a631fb5
2adfe4cc43ddd85f7323054fba694bf9ec9356fd
80abc6d95f523da81e073e2d08287ce3276c3d22
refs/heads/master
2020-03-27T10:51:07.382777
2018-08-28T13:09:29
2018-08-28T13:09:29
146,449,120
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6592977643013, "alphanum_fraction": 0.6814044117927551, "avg_line_length": 26.5, "blob_id": "8d9cfd164c7c71c69a68b557436e26a5ed1106f8", "content_id": "f20b1169d173bc9cd1a498fb84510d1092782a9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 769, "license_type": "no_license", "max_line_length": 71, "num_lines": 28, "path": "/ResizeFolderImages.py", "repo_name": "bikashkarmokar/RandomPythonCodes", "src_encoding": "UTF-8", "text": "# Resize all images in a directory to specific size.\n#\n# Have to install \"python-resize-image\" before using\n# pip install python-resize-image\n#\n# If the script is in /images/ and the files are in /images/imagefolder\n# call with: python ResizeFolderImages.py imagefolder\n#\n# Bikash Karmokar, 22/08/2018\n#---------------------------------------------------\n\nfrom PIL import Image\nimport os\nimport sys\n\ndirectory = sys.argv[1]\n\nfor file_name in os.listdir(directory):\n print(\"Processing %s\" % file_name)\n image = Image.open(os.path.join(directory, file_name))\n\n new_dimensions = (227, 227)\n output = image.resize(new_dimensions, Image.ANTIALIAS)\n\n output_file_name = os.path.join(directory,file_name)\n output.save(output_file_name, \"JPEG\", quality = 95)\n\nprint(\"Resize done\")" } ]
1
twissell-/rui
https://github.com/twissell-/rui
235d809a41320fdf7a4d159016a790945d116a9d
3b5949d719bbd93893d6dbb216ed271dff3c6863
d8460395ac521ba9a2c15caac6c7182a4e684c93
refs/heads/master
2023-06-23T06:17:32.608596
2022-06-25T13:13:24
2022-06-25T13:13:24
49,435,696
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5887804627418518, "alphanum_fraction": 0.5897560715675354, "avg_line_length": 26.845069885253906, "blob_id": "b2ec6d723a58b127007c51de014515427da97ec6", "content_id": "55f7a135b568bcb0e7f42c455b0f91394987f343", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2050, "license_type": "no_license", "max_line_length": 111, "num_lines": 71, "path": "/rui/commands/command.py", "repo_name": "twissell-/rui", "src_encoding": "UTF-8", "text": "import logging\r\nimport traceback\r\nfrom abc import (\r\n ABCMeta,\r\n abstractmethod\r\n)\r\n\r\n\r\nclass Command(metaclass=ABCMeta):\r\n \"\"\"Abstact class for every Command to inherit.\"\"\"\r\n\r\n def __init__(self, **kwargs):\r\n super(Command, self).__init__()\r\n self._logger = logging.getLogger('Command.' + type(self).__name__)\r\n\r\n def configureParameters(self, parser):\r\n parser.add_argument('-v', '--verbose', action='store_true', help='Explain what it\\'s being done')\r\n\r\n return parser\r\n\r\n @abstractmethod\r\n def _execute(self, arg_namespace):\r\n raise NotImplementedError\r\n\r\n def execute(self, arg_namespace):\r\n try:\r\n result = self._execute(arg_namespace)\r\n if not issubclass(type(result), CommandOutput):\r\n raise CommandRuntimeException('Commands \"_execute\" method must return \"CommandOutput\" object.')\r\n\r\n if arg_namespace.verbose:\r\n self._formatOutput(result)\r\n\r\n except CommandRuntimeException as err:\r\n self._logger.exception(err)\r\n exit(1)\r\n else:\r\n exit(result.exit_status)\r\n\r\n def _formatOutput(self, output):\r\n print(output.message)\r\n\r\n\r\nclass CommandOutput(object):\r\n \"\"\"Data type for Command execution result.\"\"\"\r\n\r\n def __init__(self, exit_status=0, message=''):\r\n super(CommandOutput, self).__init__()\r\n self._logger = logging.getLogger('Command.' + type(self).__name__)\r\n self._exit_status = exit_status\r\n self._message = message\r\n self._logger.debug('Created: ' + str(self))\r\n\r\n @property\r\n def exit_status(self):\r\n return self._exit_status\r\n\r\n @property\r\n def message(self):\r\n return self._message\r\n\r\n def __str__(self):\r\n return 'Exit Status: {exit_status}. Message: {message}'.format(\r\n exit_status=self._exit_status,\r\n message=self._message\r\n )\r\n\r\n\r\nclass CommandRuntimeException(Exception):\r\n \"\"\"Base class for all Tools exceptions.\"\"\"\r\n pass\r\n\r\n" }, { "alpha_fraction": 0.8392857313156128, "alphanum_fraction": 0.8392857313156128, "avg_line_length": 35.33333206176758, "blob_id": "cfe1d5955bdda709027ce819e4b1a4101b7c24db", "content_id": "167a07bc0de6d70247b9444e632a8c68d5fd19cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 112, "license_type": "no_license", "max_line_length": 46, "num_lines": 3, "path": "/rui/commands/__init__.py", "repo_name": "twissell-/rui", "src_encoding": "UTF-8", "text": "from .downloadWatching import DownloadWatching\r\nfrom .report import Report\r\nfrom .clearCache import ClearCache\r\n" }, { "alpha_fraction": 0.6556357145309448, "alphanum_fraction": 0.6593679785728455, "avg_line_length": 36.56074905395508, "blob_id": "7b71f470eac64bc1f85fbe846d2c71c12f7ac1ec", "content_id": "0c53a25ef6ba1fa7a89747732990c421c3db93b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4019, "license_type": "no_license", "max_line_length": 121, "num_lines": 107, "path": "/rui/core/fileManager.py", "repo_name": "twissell-/rui", "src_encoding": "UTF-8", "text": "import os\nimport re\nimport glob\nimport logging\nimport requests\nimport tempfile\nfrom core import config\nfrom core.common import sanitize\nfrom core.common import MediaFormat\n\n\nlogger = logging.getLogger(__name__)\n\ndef getDestinationPath(\n listEntry,\n createIfnotExits=False,\n basePath=config.get('downloads.directory'),\n divideByFormat=config.get('downloads.divideByFormat.enabled')):\n '''Returns the destination (download) path for a given listEntry'''\n\n libdir = ''\n if divideByFormat:\n divideByFormat = config.get('downloads.divideByFormat')\n libdir = divideByFormat.get('movies') if listEntry.format == MediaFormat.MOVIE else divideByFormat.get('others')\n\n rtn = os.path.join(\n basePath,\n libdir,\n config.get('valueOverride.' + str(listEntry.id) + '.directory') or '',\n sanitize(listEntry.title))\n\n if createIfnotExits and not os.path.isdir(rtn):\n os.umask(0)\n os.makedirs(rtn, mode=0o755)\n\n return rtn\n\n\ndef getEpisodePath(listEntry, episodeNumber, destinationPath=None):\n '''If is downloaded, returns de absolute path for the given episodeNumber of a listEntry. False otherwise.'''\n pattern = re.compile(r'(\\- *|\\_| |S[0-9]*E)0*%s( |v|_)' % episodeNumber)\n # searchString = (\"%0\" + str(len(str(listEntry.episodes))) + \"d\") % episodeNumber\n # basename = os.path.basename(getDestinationPath(listEntry))\n # for path in glob.glob(os.path.join(getDestinationPath(listEntry), '*', search_string % episodeNumber)):\n\n logger.info('Looking for episode: %s - %d with pattern \"%s\"' % (listEntry.title, episodeNumber, pattern))\n destinationPath = destinationPath or getDestinationPath(listEntry)\n episodeFullPath = None\n for rootDir, dirs, files in os.walk(destinationPath):\n if listEntry.episodes == 1 and len(files) >= 1:\n logger.info('Single episode anime.')\n episodeFullPath = os.path.join(destinationPath, files[0])\n \n return episodeFullPath\n else:\n for filename in files:\n logger.debug('Filename: \"%s\"' % filename)\n if pattern.search(filename):\n logger.debug('Match: \"%s\"' % pattern.search(filename).group(0))\n episodeFullPath = os.path.join(destinationPath, filename)\n logger.info('Episode %s found: \"%s\"' % (episodeNumber, episodeFullPath))\n \n return episodeFullPath\n\n logger.info('Episode %s not found.' % episodeNumber)\n return False\n\n\ndef getMissingEpisodes(listEntry, path=None):\n '''Returns a list of integers with the missing (not downloaded) episodes of a listEntry'''\n missingEpisodes = []\n\n for episode in range(listEntry.firstEpisode, listEntry.lastEpisode + 1):\n if not getEpisodePath(listEntry, episode, path):\n missingEpisodes.append(episode)\n if listEntry.ongoing:\n # if the anime is ongoing, just look for the next missing episode.\n break\n\n return missingEpisodes\n\n\ndef getEpisodes(listEntry, path=None):\n '''Returns a list of paths with the downloaded episodes of a listEntry'''\n episodes = []\n\n for episode in range(listEntry.firstEpisode, listEntry.lastEpisode + 1):\n if getEpisodePath(listEntry, episode, path):\n episodes.append(episode)\n\n return episodes\n\n\ndef downloadFile(url, filename=None):\n '''Downloads the given url into the tmpdir. returns de full path on success, False otherwise'''\n tmpdir = config.get('downloads.tmpdir')\n filename = os.path.join(tmpdir, filename) if filename else tempfile.mktemp(prefix='rui-', suffix='.tmp', dir='/tmp/')\n\n try:\n response = requests.get(url, allow_redirects=True)\n with open(filename, 'wb') as file_:\n file_.write(response.content)\n except requests.exceptions.ConnectionError as err:\n logger.error('Error downloading \"%s\":%s' % (filename, err))\n filename = False\n\n return filename\n" }, { "alpha_fraction": 0.5750296115875244, "alphanum_fraction": 0.5768905282020569, "avg_line_length": 25.73755645751953, "blob_id": "5f1ee350dcd16d4e54572c340b1c544acb8ff322", "content_id": "df45f7408317330e1a214b7f21f46ab7e1e69611", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5911, "license_type": "no_license", "max_line_length": 142, "num_lines": 221, "path": "/rui/core/anilist.py", "repo_name": "twissell-/rui", "src_encoding": "UTF-8", "text": "import requests\nimport json\nimport logging\nimport os\nfrom glob import glob\nfrom datetime import datetime\n\nfrom core.common import MediaFormat\nfrom core import config\n\nlogger = logging.getLogger(__name__)\n\nclass MediaListStatus(object):\n CURRENT = 'CURRENT'\n PLANNING = 'PLANNING'\n COMPLETED = 'COMPLETED'\n DROPPED = 'DROPPED'\n PAUSED = 'PAUSED'\n REPEATING = 'REPEATING'\n\n_QUERY = '''\nquery ($username: String, $status: MediaListStatus) {\n MediaListCollection(userName: $username, type: ANIME, status: $status) {\n lists {\n name\n status\n entries {\n score\n media {\n id\n title {\n english\n romaji\n native\n userPreferred\n }\n episodes\n format\n startDate {\n year\n }\n endDate {\n year\n }\n }\n progress\n notes\n }\n }\n }\n}\n'''\n_ENDPOINT = 'https://graphql.anilist.co'\n\ndef getWatchingListByUsername(username):\n return getListByUsernameAndStatus(username, MediaListStatus.CURRENT)\n\n\ndef getCompletedListByUsername(username):\n return getListByUsernameAndStatus(username, MediaListStatus.COMPLETED)\n\n\ndef getListByUsernameAndStatus(username, status):\n\n cache = AnilistCache.getCache(username, status)\n if config.get('cache.enabled') and cache:\n logger.info('Getting watching list from cache.')\n entries = cache\n else:\n response = requests.post(\n _ENDPOINT,\n json = {\n 'query': _QUERY,\n 'variables': {\n 'username': username,\n 'status': status\n }\n }).json()\n entries = response.get('data').get('MediaListCollection').get('lists')[0].get('entries')\n logger.debug('Raw response: ' + json.dumps(entries, indent=2))\n\n if config.get('cache.enabled'):\n AnilistCache.writeCache(username, status, entries)\n\n rtn = []\n for entry in entries:\n rtn.append(ListEntry(entry))\n\n logger.debug('Mapped respose: ' + str(rtn))\n return rtn\n\n\nclass AnilistCache(object):\n\n @staticmethod\n def _getCacheFilePath(username, status):\n return os.path.join(config.get('downloads.tmpdir'), 'rui-%s-%s.cache' % (username, status))\n \n @staticmethod\n def getCache(username, status):\n cachePath = AnilistCache._getCacheFilePath(username, status)\n now = datetime.now().timestamp()\n\n try:\n cacheFile = open(cachePath)\n cache = json.load(cacheFile)\n\n cacheLifetime = (now - cache.get('ts')) / 60\n if cacheLifetime > config.get('cache.expiration'):\n return False\n\n except OSError as err:\n return False\n except json.JSONDecodeError as err:\n return False\n else:\n return cache.get('data')\n\n @staticmethod\n def writeCache(username, status, data):\n cachePath = AnilistCache._getCacheFilePath(username, status)\n ts = datetime.now().timestamp()\n\n with open(cachePath, 'w') as cacheFile:\n json.dump({\n 'ts': ts,\n 'data': data\n }, cacheFile)\n logger.info('Cache \"%s\" updated. ts: %f' % (cachePath, ts))\n\n @staticmethod\n def clearCache():\n cachePath = AnilistCache._getCacheFilePath('*', '*')\n\n fileList = glob(cachePath)\n for filePath in fileList:\n os.remove(filePath)\n logger.info(\"Deleted file : %s\" % filePath)\n\n\nclass ListEntry(object):\n def __init__(self, raw_entry):\n super(ListEntry, self).__init__()\n self._id = raw_entry.get('media').get('id')\n self._title = config.get('valueOverride.' + str(self._id) + '.title') or raw_entry.get('media').get('title').get('userPreferred')\n self._english = raw_entry.get('media').get('title').get('english')\n self._romaji = raw_entry.get('media').get('title').get('romaji')\n self._native = raw_entry.get('media').get('title').get('native')\n self._progress = raw_entry.get('progress')\n self._notes = raw_entry.get('notes')\n self._episodes = raw_entry.get('media').get('episodes') or 98\n self._firstEpisode = config.get('valueOverride.' + str(self._id) + '.firstEpisode') or 1\n self._format = MediaFormat.map(raw_entry.get('media').get('format'))\n self._startYear = raw_entry.get('media').get('startDate').get('year')\n self._endYear = raw_entry.get('media').get('endDate').get('year')\n self._score = raw_entry.get('score') or 0\n\n @property\n def id(self):\n return self._id\n\n @property\n def title(self):\n return self._title\n\n @property\n def english(self):\n return self._english\n\n @property\n def romaji(self):\n return self._romaji\n\n @property\n def native(self):\n return self._native\n\n @property\n def progress(self):\n return self._progress\n\n @property\n def notes(self):\n return self._notes\n\n @property\n def episodes(self):\n return self._episodes\n\n @property\n def firstEpisode(self):\n return self._firstEpisode\n\n @property\n def lastEpisode(self):\n return self.firstEpisode + self.episodes - 1\n\n @property\n def year(self):\n return self._startYear\n\n @property\n def format(self):\n return self._format\n\n @property\n def score(self):\n return self._score\n\n @property\n def ongoing(self):\n if self._endYear:\n return False\n else:\n return True\n\n def __repr__(self):\n return '[%d] %s (%d/%d) %s' % (self.id, self.title, self.progress or 0, self.episodes or 0, 'Ongoing' if self.ongoing else 'Finished')\n\n def __lt__(self, other):\n return self.title < other.title\n\n\n" }, { "alpha_fraction": 0.9027777910232544, "alphanum_fraction": 0.9027777910232544, "avg_line_length": 13.399999618530273, "blob_id": "c2c8dbb0c240131b1ba56b61d1ff9907db62e7ee", "content_id": "089786c6c52c872078d839232f76f9e103d4cdb8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 72, "license_type": "no_license", "max_line_length": 18, "num_lines": 5, "path": "/requirements.txt", "repo_name": "twissell-/rui", "src_encoding": "UTF-8", "text": "python-Levenshtein\nrequests\ntransmissionrpc\ntabulate\npython-qbittorrent\n" }, { "alpha_fraction": 0.6334991455078125, "alphanum_fraction": 0.6357794404029846, "avg_line_length": 27.210525512695312, "blob_id": "8a8e3fcfc852a09e556132a8fbcac359ae4351d3", "content_id": "420782349fbedb56073c68990533e4d5dc5baa0a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4824, "license_type": "no_license", "max_line_length": 157, "num_lines": 171, "path": "/rui/core/animebytes.py", "repo_name": "twissell-/rui", "src_encoding": "UTF-8", "text": "import requests\nimport json\nimport logging\nfrom core import config\nfrom core.common import MediaFormat\nfrom pprint import pprint\nfrom core.common import sanitize\n\n\nlogger = logging.getLogger(__name__)\n\n_ENDPOINT = 'https://animebytes.tv/scrape.php?torrent_pass={torrentPass}&format=anime&username={username}&searchstr={searchstr}&filter_cat%5B1%5D&type=anime'\n\n\ndef getTorrentCollectionByAnime(anime):\n \n rtn = []\n if anime.title and not rtn:\n rtn = getTorrentCollectionByTitle(anime.title)\n if anime.romaji and not rtn:\n logger.info('Retrying with romaji title.')\n rtn = getTorrentCollectionByTitle(anime.romaji)\n if anime.native and not rtn:\n logger.info('Retrying with native title.')\n rtn = getTorrentCollectionByTitle(anime.native) \n if anime.english and not rtn:\n logger.info('Retrying with english title.')\n rtn = getTorrentCollectionByTitle(anime.english) \n if anime.romaji and not rtn:\n logger.info('Retrying with sanitize romaji title.')\n rtn = getTorrentCollectionByTitle(sanitize(anime.romaji))\n if anime.english and not rtn:\n logger.info('Retrying with sanitize english title.')\n rtn = getTorrentCollectionByTitle(sanitize(anime.english))\n \n return rtn\n\n\ndef getTorrentCollectionByTitle(title):\n logger.info('Getting torrents list for \"%s\"' % title)\n response = requests.get(_ENDPOINT.format(\n torrentPass=config.get('animebytes.torrentPass'),\n username=config.get('animebytes.username'),\n searchstr=title\n )).json()\n logger.debug('Raw response: ' + json.dumps(response, indent=2))\n\n\n if response.get('Matches') == 0:\n logger.info('No results found.')\n return []\n\n rtn = []\n for anime in response.get('Groups'):\n rtn.append(TorrentCollection(anime))\n\n logger.debug('Mapped respose: ' + str(rtn))\n return rtn\n\n\nclass TorrentCollection(object):\n def __init__(self, raw_collection):\n super(TorrentCollection, self).__init__()\n self._seriesName = raw_collection.get('SeriesName')\n syns = raw_collection.get('Synonymns')\n self._synonymns = syns if isinstance(syns, list) else [value for key, value in syns.items()]\n self._format = MediaFormat.map(raw_collection.get('GroupName'))\n self._year = int(raw_collection.get('Year'))\n links = raw_collection.get('Links')\n self._anidb = links.get('AniDB') if links else None\n self._torrents = []\n\n for torrent in raw_collection.get('Torrents'):\n self._torrents.append(Torrent(torrent))\n\n self._selectorScore = -1000\n\n @property\n def title(self):\n return self._seriesName\n\n @property\n def synonymns(self):\n return self._synonymns\n\n @property\n def format(self):\n return self._format\n\n @property\n def year(self):\n return self._year\n\n @property\n def anidb(self):\n return self._anidb\n\n @property\n def torrents(self):\n return self._torrents\n\n @property\n def score(self):\n return self._selectorScore\n\n @score.setter\n def score(self, value):\n self._selectorScore = value\n\n def __repr__(self):\n return '%s (%s) [%s] - %d torrents' % (self.title, self.format.name, self.year, len(self.torrents))\n\n\n\nclass Torrent(object):\n def __init__(self, raw_torrent):\n super(Torrent, self).__init__()\n self._url = raw_torrent.get('Link')\n self._properties = raw_torrent.get('Property')\n self._rawDownMultiplier = raw_torrent.get('RawDownMultiplier')\n self._rawUpMultiplier = raw_torrent.get('RawUpMultiplier')\n self._seeders = raw_torrent.get('Seeders')\n self._leechers = raw_torrent.get('Leechers')\n\n raw_episode = raw_torrent.get('EditionData').get('EditionTitle')\n if not raw_episode:\n self._episode = 0\n else:\n self._episode = int(''.join(i for i in raw_episode if i.isdigit()))\n\n self._selectorScore = 0\n\n\n @property\n def url(self):\n return self._url\n\n @property\n def properties(self):\n return self._properties\n\n @property\n def rawDownMultiplier(self):\n return self._rawDownMultiplier\n\n @property\n def rawUpMultiplier(self):\n return self._rawUpMultiplier\n\n @property\n def seeders(self):\n return self._seeders\n\n @property\n def leechers(self):\n return self._leechers\n\n @property\n def episode(self):\n return self._episode\n\n @property\n def score(self):\n return self._selectorScore\n\n @score.setter\n def score(self, value):\n self._selectorScore = value\n\n def __repr__(self):\n return '%s - %s (S: %s | L: %s)' % (self.episode, self.properties, self.seeders, self.leechers)\n" }, { "alpha_fraction": 0.5289891958236694, "alphanum_fraction": 0.5382247567176819, "avg_line_length": 24.311687469482422, "blob_id": "f602f67f24e090d15101d9990478a7765be28e46", "content_id": "b4d0febce0f60ed155e09e8d72a5a9e424d3c328", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1949, "license_type": "no_license", "max_line_length": 101, "num_lines": 77, "path": "/rui/core/common.py", "repo_name": "twissell-/rui", "src_encoding": "UTF-8", "text": "import logging\nimport time\n\nfrom enum import Enum\n\nlogger = logging.getLogger(__name__)\n\n\ndef format_bytes(size):\n power = 2 ** 10\n n = 0\n power_labels = {0 : '', 1: 'K', 2: 'M', 3: 'G', 4: 'T'}\n while size > power:\n size /= power\n n += 1\n return round(size, 2), power_labels[n] + 'B'\n\n\ndef timed(func):\n \"\"\"\n Decorator to log execution time of decorated methods methods\n \"\"\"\n\n _logger = logging.getLogger(__name__ + '.Timed')\n\n def wrapped(*args, **kwargs):\n start = time.time()\n res = func(*args, **kwargs)\n methodName = type(args[0]).__name__ + '.' + func.__name__\n _logger.info('Executed {method} in {time} seconds.'\n .format(method=methodName, time=(time.time() - start)))\n return(res)\n\n return wrapped\n\n\nclass MediaFormat(Enum):\n TV = 1\n MOVIE = 2\n SPECIAL = 3\n OVA = 4\n ONA = 5\n OTHER = 9\n\n def __sub__(self, other):\n return self.value - other.value\n\n @staticmethod\n def map(value):\n if value == 'TV':\n return MediaFormat.TV\n elif value == 'TV_SHORT':\n return MediaFormat.TV\n elif value == 'MOVIE':\n return MediaFormat.MOVIE\n elif value == 'SPECIAL':\n return MediaFormat.SPECIAL\n elif value == 'OVA':\n return MediaFormat.OVA\n elif value == 'ONA':\n return MediaFormat.ONA\n elif value == 'TV Series':\n return MediaFormat.TV\n elif value == 'Movie':\n return MediaFormat.MOVIE\n elif value == 'TV Special':\n return MediaFormat.SPECIAL\n elif value == 'DVD Special':\n return MediaFormat.SPECIAL\n elif value == 'BD Special':\n return MediaFormat.SPECIAL\n else:\n return MediaFormat.OTHER\n\n\ndef sanitize(string):\n return string.replace('?','').replace('.', '').replace(':', '').replace('/', ' ').replace('\"','')\n" }, { "alpha_fraction": 0.7049180269241333, "alphanum_fraction": 0.7049180269241333, "avg_line_length": 45.400001525878906, "blob_id": "5aace0180dca652f0190ddf6678f213b6db173dd", "content_id": "711e840a4b16ee5ab78afe85f6c2249844cdeb57", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1159, "license_type": "no_license", "max_line_length": 121, "num_lines": 25, "path": "/rui/core/torrentClient.py", "repo_name": "twissell-/rui", "src_encoding": "UTF-8", "text": "import transmissionrpc\nimport qbittorrent\n\nfrom core import config\n\nclass TorrentClient(object):\n def __init__(self, clientConfig=config.get('downloads.torrentClient')):\n super(TorrentClient, self).__init__()\n self._client = transmissionrpc.Client(**clientConfig)\n\n def add(self, torrentFile, destinationPath):\n paused = config.get('downloads.startPaused')\n self._client.add_torrent(torrentFile, download_dir=destinationPath, paused=paused)\n\n\nclass QBitTorrentClient(object):\n def __init__(self, clientConfig=config.get('downloads.torrentClient')):\n super(QBitTorrentClient, self).__init__()\n self._client = qbittorrent.Client('http://%s:%s' % (clientConfig['address'], clientConfig['port']))\n self._client.login(clientConfig['user'], clientConfig['password'])\n\n def add(self, torrentFile, destinationPath, name='', category='', tags=''):\n self._client.set_preferences(\n start_paused_enabled=config.get('downloads.startPaused'), content_layout='NoSubfolder')\n self._client.download_from_link(torrentFile, savepath=destinationPath, rename=name, category=category, tags=tags)" }, { "alpha_fraction": 0.5689178705215454, "alphanum_fraction": 0.571090042591095, "avg_line_length": 35.49629592895508, "blob_id": "97de44e181cc5d171364d0dc2afeb6b8b107a8ee", "content_id": "50eb4e4d2598582f3ef60bf9d0e9a62974f053ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5064, "license_type": "no_license", "max_line_length": 167, "num_lines": 135, "path": "/rui/commands/report.py", "repo_name": "twissell-/rui", "src_encoding": "UTF-8", "text": "import os\r\nfrom tabulate import tabulate\r\n\r\nfrom .command import (\r\n Command,\r\n CommandOutput,\r\n)\r\n\r\nfrom core.common import sanitize\r\nfrom core import (\r\n fileManager,\r\n anilist,\r\n config\r\n)\r\n\r\n\r\nclass Report(Command):\r\n \"\"\"Searchs torrents for new episodes for each anime in watching list and add them to transmission.\"\"\"\r\n\r\n def configureParameters(self, parser):\r\n super().configureParameters(parser)\r\n parser.description = 'Reports over your anime collection.'\r\n group = parser.add_mutually_exclusive_group()\r\n group.add_argument('--missing-anime', action='store_true', help='List completed and not downloaded Anime with score above or equal \"reports.saveMinimunScore\"')\r\n group.add_argument('--anime-to-purge', action='store_true', help='List downloaded Anime with score below \"reports.saveMinimunScore\"')\r\n group.add_argument('--destination', action='store_true', help='List all completed anime and where its/should be downloaded')\r\n\r\n return parser\r\n\r\n def _misingAnimeReport(self, args):\r\n args.verbose = True\r\n headers= [\r\n 'ID',\r\n 'Title',\r\n 'Score',\r\n 'Location',\r\n \"Missing\"\r\n ]\r\n output = []\r\n animeToCheck = [anime for anime in anilist.getCompletedListByUsername(config.get('anilist.username')) if anime.score >= config.get('reports.saveMinimunScore')]\r\n\r\n for anime in sorted(animeToCheck):\r\n searchDirs = [fileManager.getDestinationPath(anime)]\r\n searchDirs += [fileManager.getDestinationPath(anime, basePath=dir_) for dir_ in config.get('reports.alternativeDirecories')]\r\n\r\n row = []\r\n for dir_ in searchDirs:\r\n missingEpisodes = fileManager.getMissingEpisodes(anime, dir_)\r\n if not missingEpisodes:\r\n row = []\r\n break\r\n else:\r\n row = [anime.id, anime.title, anime.score]\r\n row.append(dir_)\r\n if len(missingEpisodes) > 12:\r\n row.append(str(missingEpisodes[:12]) + '+')\r\n else:\r\n row.append(missingEpisodes)\r\n\r\n if row: output.append(row)\r\n\r\n return CommandOutput(exit_status=0, message=(output, headers))\r\n\r\n def _animeToPurgeReport(self, args):\r\n headers= [\r\n 'ID',\r\n 'Title',\r\n 'Score',\r\n 'Location',\r\n \"Episodes\"\r\n ]\r\n output = []\r\n animeToCheck = [anime for anime in anilist.getCompletedListByUsername(config.get('anilist.username')) if anime.score < config.get('reports.saveMinimunScore')]\r\n\r\n for anime in sorted(animeToCheck):\r\n searchDirs = [fileManager.getDestinationPath(anime)]\r\n searchDirs += [fileManager.getDestinationPath(anime, basePath=dir_) for dir_ in config.get('reports.alternativeDirecories')]\r\n\r\n for dir_ in searchDirs:\r\n episodes = fileManager.getEpisodes(anime, dir_)\r\n if episodes:\r\n row = [anime.id, anime.title, anime.score]\r\n row.append(dir_)\r\n row.append(len(episodes))\r\n output.append(row)\r\n\r\n\r\n return CommandOutput(exit_status=0, message=(output, headers))\r\n\r\n def _destinationReport(self, args):\r\n headers= [\r\n 'ID',\r\n 'Title',\r\n 'Score',\r\n 'Destination',\r\n \"Episodes\"\r\n ]\r\n output = []\r\n\r\n for anime in sorted(anilist.getCompletedListByUsername(config.get('anilist.username'))):\r\n searchDirs = [fileManager.getDestinationPath(anime)]\r\n searchDirs += [fileManager.getDestinationPath(anime, basePath=dir_) for dir_ in config.get('reports.alternativeDirecories')]\r\n\r\n row = [anime.id, anime.title, anime.score]\r\n destination = fileManager.getDestinationPath(anime)\r\n episodeCount = 0\r\n for dir_ in searchDirs:\r\n episodes = fileManager.getEpisodes(anime, dir_)\r\n if episodes:\r\n destination = dir_\r\n episodeCount = len(episodes)\r\n\r\n row.append(destination)\r\n row.append(episodeCount)\r\n output.append(row)\r\n\r\n return CommandOutput(exit_status=0, message=(output, headers))\r\n\r\n\r\n def _execute(self, args):\r\n if args.missing_anime:\r\n return self._misingAnimeReport(args)\r\n if args.anime_to_purge:\r\n return self._animeToPurgeReport(args)\r\n if args.destination:\r\n return self._destinationReport(args)\r\n\r\n return CommandOutput(exit_status=1, message='No report selected')\r\n\r\n def _formatOutput(self, output):\r\n if output.exit_status == 1:\r\n print(output.message)\r\n else:\r\n print(tabulate(*output.message, tablefmt=\"psql\"))\r\n print('%d rows.' % len(output.message[0]))\r\n\r\n" }, { "alpha_fraction": 0.6634241342544556, "alphanum_fraction": 0.6634241342544556, "avg_line_length": 24.947368621826172, "blob_id": "c4efae713b7839e825f5c17504aedae6a5db83df", "content_id": "006d5847f2eb6c61b5a019f4d865bd35f4cd83b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 514, "license_type": "no_license", "max_line_length": 105, "num_lines": 19, "path": "/rui/commands/clearCache.py", "repo_name": "twissell-/rui", "src_encoding": "UTF-8", "text": "\r\nfrom .command import (\r\n Command,\r\n CommandOutput,\r\n)\r\nfrom core.anilist import AnilistCache\r\n\r\nclass ClearCache(Command):\r\n \"\"\"Searchs torrents for new episodes for each anime in watching list and add them to transmission.\"\"\"\r\n\r\n def configureParameters(self, parser):\r\n super().configureParameters(parser)\r\n parser.description = 'Deletes all cache files.'\r\n\r\n return parser\r\n\r\n def _execute(self, args):\r\n AnilistCache.clearCache()\r\n\r\n return CommandOutput()\r\n" }, { "alpha_fraction": 0.6466019153594971, "alphanum_fraction": 0.6576051712036133, "avg_line_length": 26.105262756347656, "blob_id": "cc8d2203f26464fa3a7439c96cd6590935d9150d", "content_id": "9bccd5bd9f2976f8ecdcdec5b5664b77dccb716b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1545, "license_type": "no_license", "max_line_length": 129, "num_lines": 57, "path": "/rui/rui.py", "repo_name": "twissell-/rui", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport logging\nimport argparse\nimport inspect\nfrom logging.handlers import RotatingFileHandler\n\nfrom core import (\n anilist,\n animebytes,\n selector,\n fileManager,\n config\n)\nfrom core.torrentClient import TorrentClient, QBitTorrentClient\nimport commands\nfrom pprint import pprint\n\nroot_logger = logging.getLogger('')\nroot_logger.setLevel(logging.DEBUG)\n\nloghandler = logging.handlers.RotatingFileHandler(\n './rui.log', maxBytes=100 * 1024 * 1024, backupCount=3)\nformatter = logging.Formatter(\n '%(asctime)s %(levelname)s - %(name)s ln.%(lineno)d - %(message)s')\n\nloghandler.setFormatter(formatter)\nroot_logger.addHandler(loghandler)\n\nlogger = logging.getLogger('cli')\n\ndef main():\n parser = argparse.ArgumentParser(prog='rui', add_help=True)\n subparsers = parser.add_subparsers()\n\n for name in dir(commands):\n if not (name[-2:] == '__' or name == 'command'):\n command_class = getattr(commands, name)\n if inspect.ismodule(command_class):\n continue\n\n if issubclass(command_class, commands.command.Command):\n command = command_class()\n command.configureParameters(subparsers.add_parser(name[0].lower() + name[1:])).set_defaults(func=command.execute)\n\n args = parser.parse_args()\n if 'func' in dir(args):\n args.func(args)\n else:\n parser.print_help()\n\nif __name__ == '__main__':\n try:\n main()\n except Exception as err:\n logger.error(err, exc_info=True)\n exit(1)\n" }, { "alpha_fraction": 0.5530791878700256, "alphanum_fraction": 0.5563049912452698, "avg_line_length": 42.25973892211914, "blob_id": "6f417ba122979b99bd63801290bdd59a1e79ef7a", "content_id": "b5e003bbaf01e1f40192f1bd7370755f5e17ef12", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3410, "license_type": "no_license", "max_line_length": 138, "num_lines": 77, "path": "/rui/commands/downloadWatching.py", "repo_name": "twissell-/rui", "src_encoding": "UTF-8", "text": "\r\nfrom .command import (\r\n Command,\r\n CommandOutput,\r\n)\r\nfrom core import (\r\n fileManager,\r\n anilist,\r\n config,\r\n selector,\r\n animebytes\r\n)\r\nfrom core.torrentClient import TorrentClient, QBitTorrentClient\r\n\r\n\r\n\r\nclass DownloadWatching(Command):\r\n \"\"\"Searchs torrents for new episodes for each anime in watching list and add them to transmission.\"\"\"\r\n\r\n def configureParameters(self, parser):\r\n super().configureParameters(parser)\r\n parser.description = 'Searchs torrents for new episodes for each anime in watching list and add them to transmission.'\r\n parser.add_argument('--dry-run', action='store_true', help='Do not add the torrent to transmission.')\r\n parser.add_argument('--id', action='store', default=False, help='Process only anime with the given anilist id. Useful for debug.')\r\n\r\n return parser\r\n\r\n def _execute(self, args):\r\n # def process(isDryRun=False, verbose=False):\r\n output = ''\r\n tc = QBitTorrentClient()\r\n for anime in anilist.getWatchingListByUsername(config.get('anilist.username')):\r\n if anime.notes and 'rui.ignore' in anime.notes:\r\n continue\r\n if args.id and anime.id != int(args.id):\r\n continue\r\n if args.verbose: \r\n output += ('=' * 120 + '\\n')\r\n output += ('Anime:%s' % anime + '\\n')\r\n output += ('Destination: %s' % fileManager.getDestinationPath(anime, True) + '\\n')\r\n\r\n missingEpisodes = fileManager.getMissingEpisodes(anime)\r\n if not missingEpisodes:\r\n if args.verbose: output += ('All episodes of %s have been downloaded.\\n' % anime.title)\r\n continue\r\n if args.verbose: output += ('Missing Episodes: %s' % missingEpisodes + '\\n')\r\n\r\n collection = selector.selectCollection(anime, animebytes.getTorrentCollectionByAnime(anime))\r\n if not collection:\r\n if args.verbose: output += ('No torrent collection found for \"%s\"' % anime.title + '\\n')\r\n continue\r\n if args.verbose: output += ('Collection: %s' % collection + '\\n')\r\n\r\n torrents = selector.selectTorrentFromCollection(anime, collection, missingEpisodes)\r\n if not torrents:\r\n if args.verbose: output += ('No torrents found for \"%s\" %s' % (anime.title, str(missingEpisodes)) + '\\n')\r\n continue\r\n\r\n #tc = TorrentClient()\r\n tc = QBitTorrentClient()\r\n for torrent in torrents:\r\n if args.verbose: output += ('Torrent: %s' % torrent + '\\n')\r\n if not args.dry_run:\r\n if torrent.episode:\r\n detail = ' - %03d' % torrent.episode\r\n elif anime.episodes > 1:\r\n detail = ' - %03d-%03d' % (anime.firstEpisode, anime.lastEpisode)\r\n else:\r\n detail = ''\r\n\r\n name = anime.title + detail\r\n category = 'Anime'\r\n tags = ','.join(['rui', 'Ongoing' if torrent.episode else 'Finished'])\r\n tc.add(\r\n torrent.url, fileManager.getDestinationPath(anime, True),\r\n name=name, category=category, tags=tags)\r\n\r\n return CommandOutput(exit_status=0, message=output)\r\n" }, { "alpha_fraction": 0.7868852615356445, "alphanum_fraction": 0.7868852615356445, "avg_line_length": 39.66666793823242, "blob_id": "3026f0b27783a8121dad44f44949147ddb180c5f", "content_id": "b8cf359502d36d6616a33139de88465f344460b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 122, "license_type": "no_license", "max_line_length": 114, "num_lines": 3, "path": "/README.md", "repo_name": "twissell-/rui", "src_encoding": "UTF-8", "text": "# Rui\n\nRui is ment to be a bridge between my Anilist watching list and my AnimeBytes account to feed my anime collection.\n" }, { "alpha_fraction": 0.6163420081138611, "alphanum_fraction": 0.6187770366668701, "avg_line_length": 33.532711029052734, "blob_id": "0eb291dcfa22473605824c96679c9fad9d0c5a3e", "content_id": "0bb102e3d525cde89b9594a0845767f77b0cbc44", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3696, "license_type": "no_license", "max_line_length": 109, "num_lines": 107, "path": "/rui/core/selector.py", "repo_name": "twissell-/rui", "src_encoding": "UTF-8", "text": "from Levenshtein import distance\nfrom core import config\nimport logging\n\nfrom pprint import pprint\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\n\ndef _title_comparator(listEntry, torrentCollection):\n try:\n collection_values = torrentCollection.synonymns + [torrentCollection.title]\n list_values = [\n listEntry.english,\n listEntry.romaji,\n listEntry.native\n ]\n return min([distance(a.lower(),b.lower()) for a in collection_values if a for b in list_values if b])\n except AttributeError as err:\n print(err)\n print(torrentCollection.synonymns)\n print(torrentCollection.title)\n exit()\n\n\n\ndef selectCollection(listEntry, torrentCollections):\n logger.info('Selecting torrent collection for \"%s' % listEntry.title)\n best = None\n for collection in torrentCollections:\n logger.debug('Analizing collection: \"%s\"' % collection.title)\n collection.score = - (\n 0.1 * abs(listEntry.format - collection.format)\n + _title_comparator(listEntry, collection) \n + 5 * abs(listEntry.year - collection.year))\n logger.debug('Collection score: \"%f\"' % collection.score)\n\n if not best:\n best = collection\n logger.debug('New best: \"%s\"' % best)\n elif best.score < collection.score:\n best = collection\n logger.debug('New best: \"%s\"' % best)\n if best:\n logger.info('Best: %s (score: %f)' % (best, best.score))\n return best\n\n\ndef _selectTorrentFromCollection(collection, filters, episode_filter=0):\n best = None\n for torrent in collection.torrents:\n logger.debug('Analizing torrent: %s' % torrent)\n torrent.score = 0\n\n if episode_filter and episode_filter != torrent.episode:\n continue\n\n if any(value.lower() in torrent.properties.lower() for value in filters.get('exclude')):\n logger.debug('Excluded: \"%s\"' % torrent)\n continue\n\n for value, modificator in filters.get('prefer').items():\n if value.lower() in torrent.properties.lower():\n logger.debug('Prefered \"%s\" prensent, +%f' % (value, modificator))\n torrent.score += modificator\n\n seedScore = 0.001 * torrent.seeders\n logger.debug('Seeeders score +%f' % seedScore)\n torrent.score += seedScore\n\n logger.debug('Final score: %f' % torrent.score)\n if not best:\n best = torrent\n logger.debug('New best: \"%s\"' % best)\n elif best.score < torrent.score:\n best = torrent\n logger.debug('New best: \"%s\"' % best)\n\n if best:\n logger.info('Best: %s (score: %f)' % (best, best.score))\n return best\n else:\n logger.info('No torrents found for \"%s\" (episode %d)' % (collection.title, episode_filter))\n\n\n\ndef selectTorrentFromCollection(listEntry, collection, missingEpisodes):\n rtn = []\n\n if not listEntry.ongoing and listEntry.episodes == len(missingEpisodes):\n logger.debug('Using finished filters')\n torrent = _selectTorrentFromCollection(collection, config.get('downloads.filters.finished'))\n if torrent:\n rtn.append(torrent)\n else:\n logger.debug('Using ongoing filters')\n logger.debug('Missing episodes: %s' % missingEpisodes)\n filters = config.get('downloads.filters.ongoing')\n\n for episode in missingEpisodes:\n logger.debug('Getting episode %d ' % episode)\n torrent = _selectTorrentFromCollection(collection, filters, episode)\n if torrent:\n rtn.append(torrent)\n\n return rtn\n\n" }, { "alpha_fraction": 0.596666693687439, "alphanum_fraction": 0.596666693687439, "avg_line_length": 30.578947067260742, "blob_id": "04c22e5703600610aa2744a97ea313f5b8e741a1", "content_id": "c72d15902c43ecf607b498d13a9443351ce466f4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 600, "license_type": "no_license", "max_line_length": 109, "num_lines": 19, "path": "/rui/core/config.py", "repo_name": "twissell-/rui", "src_encoding": "UTF-8", "text": "import json\n\n_CONFIG_PATH = './config.json'\n\ndef get(identifier):\n try:\n config_file = open(_CONFIG_PATH)\n data = json.load(config_file)\n for key in identifier.split('.'):\n data = data.get(key)\n if not data:\n return data\n except OSError as err:\n exit('ERROR: Error reading file: \"{configFile}\". {error}'.format(configFile=_CONFIG_PATH, error=err))\n except json.JSONDecodeError as err:\n print(err)\n exit('ERROR: Error deconding file: \"{configFile}\"'.format(configFile=_CONFIG_PATH))\n else:\n return data\n" } ]
15
anthonygrees/lambda_codecommit
https://github.com/anthonygrees/lambda_codecommit
8648fa162de499db1cc02a23b17256d5959f2e80
9e917a1657ab3e2b5b71c3b7bdda1f797ea4a5a9
41311070a31c7abef59e7fab3611104cdf912c4a
refs/heads/main
2023-03-21T21:45:25.993295
2021-03-17T01:07:18
2021-03-17T01:07:18
344,665,249
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7421524524688721, "alphanum_fraction": 0.7506726384162903, "avg_line_length": 48.42222213745117, "blob_id": "e23bba28cd6888fbfe0164e589769cccfbd88ace", "content_id": "4d919c58718f3dd0a52e45728c787bef4d0ef21a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2230, "license_type": "permissive", "max_line_length": 255, "num_lines": 45, "path": "/AWS_CodeCommit_App.md", "repo_name": "anthonygrees/lambda_codecommit", "src_encoding": "UTF-8", "text": "# AWS CodeCommit App for Splunk\n \n### Description\nThe app shows users code commits and branch details as events from AWS CodeCommit. \n \n![UI](https://github.com/anthonygrees/lambda_codecommit/blob/main/images/ui.png)\n \n### Feeding Data\nData is sent via an Event Trigger from AWS CodeCommit. The Event Trigger is monitored by a Lambda Function which sends the `json` output to a Splunk HEC (HTTP Event Collector) end point. \n \n### Setup the AWS CodeCommit App\nThe following steps will ensure the app has data: \n \n#### Step 1: Install the Application\nYou can install the app from the command line or from SplunkBase. \n \nTo install via the CLI, run the following command. \n```bash\nsudo /opt/splunk/bin/splunk install app /tmp/awscodecommit.tgz -auth user:password\n```\n \n#### Step 2: Deploy the Lambda Application\nNavigate to the [codecommit-to-hec-splunk](https://serverlessrepo.aws.amazon.com/applications/us-east-1/457777705445/codecommit-to-splunk-hec) lambda application and click the deploy button. \n \n![UI](https://github.com/anthonygrees/lambda_codecommit/blob/main/images/lambda.png)\n \n#### Step 3: Add a Trigger to the Lambda Function\nAdd a trigger to the Lambda function you just deployed and attach it to you AWS CodeCommit repository. \n \n![Trigger](https://github.com/anthonygrees/lambda_codecommit/blob/main/images/trigger.png)\n \n#### Step 4: Add new Data Input in Splunk\nIn your Splunk instance, go to `Settings -> Data Inputs` and choose `HTTP Event Collector` then add the following. \n- Default Index set to `Main`. \n- Source Type set to `awscodecommit`. \n \n![Data Input](https://github.com/anthonygrees/lambda_codecommit/blob/main/images/data-input.png)\n \n \nBe sure to copy the `token` created as you will need this for the Environment Variables in Step 5. \n \n#### Step 5: Create Environment Variables for the Lambda Function\nIn order for the Lambda function to send the events triggered from your code commits to you Splunk instance, it needs to know `how` to connect to the HTTP Event Collector end point. The connection details are stored in the Lambda Environment variables. \n \n![Env Vars](https://github.com/anthonygrees/lambda_codecommit/blob/main/images/env-var.png)\n \n \n" }, { "alpha_fraction": 0.6383673548698425, "alphanum_fraction": 0.6489796042442322, "avg_line_length": 40.52542495727539, "blob_id": "447e331342107e0b515684335646e3882f3d9823", "content_id": "b8245b21962c4809e11db05316fb55e0ae4df1d2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2450, "license_type": "permissive", "max_line_length": 365, "num_lines": 59, "path": "/code/lambda_function.py", "repo_name": "anthonygrees/lambda_codecommit", "src_encoding": "UTF-8", "text": "import json\nimport urllib3\nimport os\nimport ssl\nimport boto3\nimport argparse\n\ncodecommit = boto3.client('codecommit')\nregion = codecommit.meta.region_name\n\ndef lambda_handler(event, context):\n #Parse URL Parameters\n http_method_a = os.environ[\"http_method\"]\n http_method = str(http_method_a)\n url_a = os.environ[\"url\"]\n url = str(url_a)\n port_a = os.environ[\"port\"]\n port = str(port_a)\n token_a = os.environ[\"token\"]\n token = str(token_a)\n\n #Build Full URL out of Parameters\n full_url = http_method+'://'+url+':'+port+'/services/collector/raw'\n \n #Do not check SSL certs\n cert_reqs = ssl.CERT_NONE\n urllib3.disable_warnings()\n http = urllib3.PoolManager(cert_reqs = cert_reqs)\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n \n #Log the updated references from the event\n references = { reference['ref'] for reference in event['Records'][0]['codecommit']['references'] }\n \n #Get the repository from the event and show its git clone URL\n repository = event['Records'][0]['eventSourceARN'].split(':')[5]\n region = event['Records'][0]['eventSourceARN'].split(\":\")[3]\n branch = event['Records'][0]['codecommit']['references'][0]['ref'].split(\"/\")[2];\n source_version = event['Records'][0]['codecommit']['references'][0]['commit']\n commit_time = event['Records'][0]['eventTime']\n eventTriggerName = event['Records'][0]['eventTriggerName']\n userIdentityARN = event['Records'][0]['userIdentityARN'].split(':')[5]\n ##print(\"*** Event is %s\" % (event))\n \n try:\n response = codecommit.get_repository(repositoryName=repository)\n response1 = '{\"AWSCodeCommit\": \"data\", \"Repository_HTTP\": \"'+response['repositoryMetadata']['cloneUrlHttp']+'\", \"References\": \"'+str(references)+'\", \"Region\": \"'+region+'\", \"Branch\": \"'+branch+'\", \"Source_Version\": \"'+source_version+'\", \"Commit_Time\": \"'+commit_time+'\", \"EventTriggerName\": \"'+eventTriggerName+'\", \"UserIdentityARN\": \"'+userIdentityARN+'\"}'\n \n except Exception as e:\n print(e)\n print('Error getting repository {}. Make sure it exists and that your repository is in the same region as this function.'.format(repository))\n raise e\n\n #Post Event\n r = http.request('POST', full_url, body=response1, headers={'Content-Type': 'application/json', 'Authorization':'Splunk '+token})\n\n return {\n 'statusCode': 200,\n 'body':'Success'\n }\n" }, { "alpha_fraction": 0.7561797499656677, "alphanum_fraction": 0.7584269642829895, "avg_line_length": 43.5, "blob_id": "46f85c283dfd9b26ad1c371981b937301afecc44", "content_id": "66873a1822bd3d186ee35eb26808e3fff66b8664", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 890, "license_type": "permissive", "max_line_length": 234, "num_lines": 20, "path": "/README.md", "repo_name": "anthonygrees/lambda_codecommit", "src_encoding": "UTF-8", "text": "# AWS CodeCommit to Splunk HEC Serverless Application\n \n### About\nThis application was designed to be a generic approach for collecting AWS CodeCommit events and forwarding them onto a HEC (HTTP-Event-Collector) Endpoint in Splunk. \n\nBelow is the current README for the serverless function. \n\n### CodeCommit-to-Splunk-HEC\n\nThis application will create a lambda function, with an API Gateway trigger. When deployed, you can use the API Gateway UR and \"webhook-to-hec\" endpoint, along with Environment Variables, to send data from AWS CodeCommit into Splunk. \n\n### Environment Variables defined in the Lambda Function:\n - `url` - the FQDN of your Splunk Server\n - `http_method` - whether you are running HEC with SSL enabled or not. You'll provide either http, or https.\n - `port` - the port you are running HEC on.\n - `token` - your HEC Token. \n\n### License\n\nApache License 2.0 (undefined)\n" } ]
3
pochnsong/drestful
https://github.com/pochnsong/drestful
ddbc1e2daf0aaf3e6f1a896c1441d9c900042e8e
6086599bd0b92d0bb219dbce5e0792bc9b371f90
9cc312121d6a9a0d358e28573ac79ed50c33a56b
refs/heads/master
2020-03-28T03:05:53.169121
2018-09-06T05:00:44
2018-09-06T05:00:44
147,619,976
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5219805836677551, "alphanum_fraction": 0.5224432945251465, "avg_line_length": 24.880239486694336, "blob_id": "babf59df9f2fe196219e4713d78321b6e3fc9142", "content_id": "49018ea19d2dc815b791d48504348b156cde8bd5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4322, "license_type": "no_license", "max_line_length": 71, "num_lines": 167, "path": "/api.py", "repo_name": "pochnsong/drestful", "src_encoding": "UTF-8", "text": "from django.shortcuts import HttpResponse, redirect\nfrom django.views.generic.edit import View, ModelFormMixin, CreateView\nimport json\n\n\nclass JsonResponse(HttpResponse):\n def __init__(self, content, *args, **kwargs):\n if isinstance(content, (dict, list)):\n content = json.dumps(content)\n kwargs[\"content_type\"] = \"application/json\"\n super().__init__(content, *args, **kwargs)\n\n\nclass JsonpResponse(HttpResponse):\n def __init__(self, content, callback_fn=\"callback\", **kwargs):\n if isinstance(content, (dict, list)):\n content = json.dumps(content)\n\n content = \"%s(%s)\" % (callback_fn, content)\n super().__init__(content, **kwargs)\n\n\nclass DetailAPI(View):\n model = None\n list_display = ()\n default_filter = {}\n filter_fields = ()\n result_name = 'object'\n jsonp = None\n\n def get_list_display(self):\n return self.list_display\n\n def get_data(self, obj):\n res = {}\n\n for k in self.get_list_display():\n fn = None\n if isinstance(k, tuple) or isinstance(k, list):\n fn = k[1]\n k = k[0]\n\n if fn:\n res[k] = fn(obj)\n else:\n _v = getattr(obj, k, '')\n if not isinstance(_v, bool):\n _v = str(_v)\n res[k] = _v\n\n return res\n\n def get_filter(self):\n _filter = self.default_filter.copy()\n for k in self.filter_fields:\n if k in self.request.GET:\n _v = self.request.GET.get(k)\n _filter[k] = None if _v == 'None' else _v\n\n return _filter\n\n def get_result(self):\n obj = self.model.objects.filter(**self.get_filter()).last()\n\n res = {\n self.result_name: self.get_data(obj)\n }\n\n return res\n\n def get(self, request):\n res = self.get_result()\n if self.jsonp:\n fn = request.GET.get(self.jsonp)\n if fn:\n return JsonpResponse(res, fn)\n\n return JsonResponse(res)\n\n\nclass ListAPI(DetailAPI):\n def get_result(self):\n object_list = self.model.objects.filter(**self.get_filter())\n res_list = []\n for obj in object_list:\n res_list.append(self.get_data(obj))\n\n res = {\n self.result_name: res_list\n }\n\n return res\n\n\nclass CreateAPI(View):\n model = None\n\n default_fields = {}\n fields = []\n\n def _clean_field(self, k):\n value = None\n if k in self.request.POST:\n value = self.request.POST.get(k)\n elif k in self.request.FILES:\n value = self.request.FILES.get(k)\n\n if value == 'None':\n value = None\n return value\n\n def get_create_data(self):\n res = self.default_fields.copy()\n\n for k in self.fields:\n if k in self.request.POST or k in self.request.FILES:\n clean_fn = getattr(self, 'clean_%s' % k, None)\n if callable(clean_fn):\n res[k] = clean_fn()\n else:\n res[k] = self._clean_field(k)\n\n return res\n\n def success(self):\n return JsonResponse({'msg': 'ok', 'id': self.object.pk})\n\n def failed(self):\n content = json.dumps({'msg': 'failed', 'err': str(self.error)})\n return JsonResponse(content)\n\n def post(self, request, **kwargs):\n data = self.get_create_data()\n\n try:\n self.object = self.model(**data)\n self.object.save()\n\n except Exception as e:\n #print('error', e)\n self.error = e\n return self.failed()\n\n return self.success()\n\n\nclass DeleteAPI(View):\n model = None\n filter_fields = []\n default_filter = {}\n\n def get_filter(self):\n _filter = self.default_filter.copy()\n for k in self.filter_fields:\n if k in self.request.POST:\n _v = self.request.POST.get(k)\n _filter[k] = None if _v == 'None' else _v\n\n return _filter\n\n def post(self, request):\n # print(request)\n _filter = self.get_filter()\n if _filter:\n object_list = self.model.objects.filter(**_filter).delete()\n # print(object_list)\n return JsonResponse({})\n" } ]
1
hermosa-circulo/hermosa_circulo
https://github.com/hermosa-circulo/hermosa_circulo
c3444d5de703c17d3cfbe9d56f01d106c4ed1345
0360b4b302ce6a29684c7a2084a9744b74358a79
56d5adbb4e1cd79788542a06603a1fcdc30fe72c
refs/heads/master
2021-06-20T22:13:00.514741
2017-07-24T14:40:32
2017-07-24T14:41:07
46,569,740
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5693215131759644, "alphanum_fraction": 0.5929203629493713, "avg_line_length": 23.214284896850586, "blob_id": "92ca712ef184ac54f4ed635fc9809de9dd694bbb", "content_id": "85a3f10fdaf52feb027164d2c4b062e4ec830c98", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 339, "license_type": "no_license", "max_line_length": 100, "num_lines": 14, "path": "/app/utils/bb/load_execute.py", "repo_name": "hermosa-circulo/hermosa_circulo", "src_encoding": "UTF-8", "text": "import os\na = 0\nLoad = \"ok\"\nwhile a==0:\n\tfp = open(\"change.txt\",'r')\n\ttemp = fp.readlines()\n\tfor i in range(len(temp)):\n\t\ttemp[i] = temp[i].rstrip()\n\tif Load != temp[0]:\n\t\tLoad = temp[0]\n\t\tstr_temp = \"blender --background --python Lattice.py \"+temp[1]+\" \"+temp[2]+\" \"+temp[3]+\" \"+temp[4]\n\t\tos.system(str_temp)\n\t\tprint str_temp\n\tfp.close()\n" }, { "alpha_fraction": 0.5129310488700867, "alphanum_fraction": 0.557758629322052, "avg_line_length": 27.743589401245117, "blob_id": "257503e6c0892a6c110f18bd646cda77004f132d", "content_id": "9d5418f880004b325d3f59ac984bfdd8c342e922", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1160, "license_type": "no_license", "max_line_length": 105, "num_lines": 39, "path": "/app/utils/Cylinder/Vertex.py", "repo_name": "hermosa-circulo/hermosa_circulo", "src_encoding": "UTF-8", "text": "import math\r\nimport Closoid\r\n\r\ndef ring(r,point_num,center):\r\n array_v = []\r\n theata = 0\r\n fineness = 20\r\n count = 0\r\n while True:\r\n x = r * math.cos(math.pi*fineness*count/360) + center[0]\r\n y = r * math.sin(math.pi*fineness*count/360) + center[1]\r\n z = center[2]\r\n array_v.append([x,y,z])\r\n if 720 == fineness*count:\r\n break\r\n count = count + 1\r\n return array_v\r\n\r\ndef makeR(x,r,p):\r\n x = x*p\r\n if r > x:\r\n \ty = math.sqrt(math.pow(r,2) - math.pow(x,2))\r\n else: \r\n\t y = 0.5\r\n return y\r\n\r\ndef makeV(wheel_radius,begining_point,begin,point_num,breast_wide):\r\n vertex=[]\r\n first_point = Closoid.Closoid((math.pi/200)*(0+begining_point))\r\n first_p2 = Closoid.Closoid((math.pi/200)*(begin+begining_point))\r\n\r\n for i in range(150):\r\n r = makeR(i,wheel_radius,breast_wide)\r\n point = Closoid.Closoid((math.pi/200)*(i+begining_point))\r\n center = [0,-(point[0]-first_point[0]-first_p2[0])*100,(point[1]-first_point[1]-first_p2[1])*100]\r\n if begin < i and r > 0.5:\r\n vertex.append(ring(r,point_num,center))\r\n\r\n return vertex\r\n" }, { "alpha_fraction": 0.6587436199188232, "alphanum_fraction": 0.68222975730896, "avg_line_length": 30.828828811645508, "blob_id": "fefaf757b72491dea7ade283fdbb7fed077d3509", "content_id": "46744b9f8f0f57f11b11c1eb91fc5b41bcfb144b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3534, "license_type": "no_license", "max_line_length": 196, "num_lines": 111, "path": "/app/utils/bb/Lattice.py", "repo_name": "hermosa-circulo/hermosa_circulo", "src_encoding": "UTF-8", "text": "import sys\nimport bpy\nimport os\ndef delete_all():\n\tfor item in bpy.context.scene.objects:\n\t\tbpy.context.scene.objects.unlink(item)\n\tfor item in bpy.data.objects:\n\t\tbpy.data.objects.remove(item)\n\tfor item in bpy.data.meshes:\n\t\tbpy.data.meshes.remove(item)\n\tfor item in bpy.data.materials:\n\t\tbpy.data.materials.remove(item)\ndef createLattice(scene,argvs):\n\tlattice = bpy.data.lattices.new(\"Lattice\")\n\tlattice_ob = bpy.data.objects.new(\"LatticeObj\",lattice)\n\tlattice_ob.location = (0.55,-0.63,0.5)\n\t#lattice_ob.show_x_ray = True\n\tfor ob in scene.objects:\n\t\tif ob.type == 'MESH':\n\t\t\tmod = ob.modifiers.new(\"Lattice\",'LATTICE')\n\t\t\tmod.object = lattice_ob\n\tscene.objects.link(lattice_ob)\n\tscene.objects.active = lattice_ob\n\tscene.update()\n\n\tlattice.interpolation_type_u = 'KEY_LINEAR'\n\tlattice.interpolation_type_v = 'KEY_CARDINAL'\n\tlattice.interpolation_type_w = 'KEY_BSPLINE'\n\tlattice.use_outside =False\n\tlattice.points_u =2\n\tlattice.points_v =2\n\tlattice.points_w =2\n\ts = 0.5\n\ts2 = float(argvs[6])\n\ts_z = float(argvs[7])\n\ts_y = float(argvs[8])\n\ts_x = float(argvs[9])\n\tpoints = [\n\t\t(-s,-s,-s),(s2+s_x,-s2+s_y,-s2+s_z),(-s,s,-s),(s2+s_x,s2+s_y,-s2+s_z),\n\t\t(-s,-s,s),(s2+s_x,-s2+s_y,s2+s_z),(-s,s,s),(s2+s_x,s2+s_y,s2+s_z)\n\t]\n\tbpy.context.scene.update()\n\tbpy.ops.object.mode_set(mode='EDIT')\n\tfor n,pt in enumerate(lattice.points):\n\t\tfor k in range(3):\n\t\t\tpt.co_deform[k] = points[n][k]\n\t\t\t#pass\n\t\"\"\"2cool\"\"\"\n\tlattice_2 = bpy.data.lattices.new(\"Lattice2\")\n\tlattice_ob_2 = bpy.data.objects.new(\"LatticeObj2\",lattice_2)\n\tlattice_ob_2.location = (0.55,0.5,0.5)\n\t#lattice_ob.show_x_ray = True\n\tfor ob in scene.objects:\n\t\tif ob.type == 'MESH':\n\t\t\tmod = ob.modifiers.new(\"Lattice2\",'LATTICE')\n\t\t\tmod.object = lattice_ob_2\n\tscene.objects.link(lattice_ob_2)\n\tscene.objects.active = lattice_ob_2\n\tscene.update()\n\tlattice_2.interpolation_type_u = 'KEY_LINEAR'\n\tlattice_2.interpolation_type_v = 'KEY_CARDINAL'\n\tlattice_2.interpolation_type_w = 'KEY_BSPLINE'\n\tlattice_2.use_outside =False\n\tlattice_2.points_u =2\n\tlattice_2.points_v =2\n\tlattice_2.points_w =2\n\tpoints = [\n\t\t(-s,-s,-s),(s2+s_x,-s2-s_y,-s2+s_z),(-s,s,-s),(s2+s_x,s2-s_y,-s2+s_z),\n\t\t(-s,-s,s),(s2+s_x,-s2-s_y,s2+s_z),(-s,s,s),(s2+s_x,s2-s_y,s2+s_z)\n\t]\n\tbpy.context.scene.update()\n\n\tfor n,pt in enumerate(lattice_2.points):\n\t\tfor k in range(3):\n\t\t\tpt.co_deform[k] = points[n][k]\n\t\t\t#pass\n\n\treturn lattice_ob\n\n\n\nif __name__ == \"__main__\":\n\targvs = sys.argv\n\tLOAD_FILE = os.path.join(os.getcwd(),'app/static/OBJfile/boobs_flat.obj')\n\tdelete_all()\n\tbpy.ops.import_scene.obj(filepath= LOAD_FILE)\n\tscene = bpy.context.scene\n\tlat = createLattice(scene,argvs)\n\t\n\tbpy.context.scene.objects.active = bpy.data.objects[\"Default\"]\n\tbpy.ops.object.mode_set(mode='EDIT')\n\t#bpy.context.scene.update()\n\t'''\t\n\tbpy.ops.mesh.mark_sharp()\n\tfor i in range(10):\n\t\tbpy.ops.mesh.vertices_smooth()\n\tbpy.ops.object.mode_set(mode='OBJECT')\n\t'''\n\tEXP_FILE = \"app/static/OBJfile/ex_boobs.obj\"\n\t#printOBJ()\n\tsavePath = os.path.abspath(os.path.dirname(__file__))\n\tbpy.path.relpath(savePath)\n\tbpy.ops.export_scene.obj(filepath=EXP_FILE)\n\t#bpy.ops.export_scene.obj(filepath=\"OKOK.obj\")\n\t#bpy.ops.wm.save_as_mainfile(filepath=\"OKOK.blend\",relative_remap=True)\n\t#add_cone()\n\t#add_cube_by_data()\n\t#bpy.data.objects[\"Default\"].data.vertices[0].co.x +=20.0\n\t#bpy.ops.object.editmode_toggles()\n\t#bpy.ops.object.mode_set(mode='EDIT')\n\t#bpy.ops.translate(value=(0,0,2),constraint_axis=(False,False,False),constraint_orientation='GLOBAL',mirror = False,proportional='DISABLED',proportional_edit_falloff='SMOOTH',proportional_size=1)\n\n" }, { "alpha_fraction": 0.6547085046768188, "alphanum_fraction": 0.6748878955841064, "avg_line_length": 23.77777862548828, "blob_id": "c81f68ce67bac10958bbf1985b631a8ff1030c3c", "content_id": "393b7883c2b011284af1d21e88b47286fc205942", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 892, "license_type": "no_license", "max_line_length": 107, "num_lines": 36, "path": "/app/utils/bb/push_parameter.cgi", "repo_name": "hermosa-circulo/hermosa_circulo", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nimport os\nimport cgi\nimport cgitb\nimport sys\nimport codecs\nimport time\nimport random\n#print \"Location: http://ec2-54-148-249-100.us-west-2.compute.amazonaws.com/mrdoob-three.js/index.html\\n\\n\"\nprint \"Content-type: text/html\\n\\n\";\nprint \"<html>\\n<body>\\n\";\nprint \"<div style =\\\"width: 100%; font-size: 40px; font-weigth: bold; text-align: center;\\\">\\n\";\nprint \"CGI TEST\";\nprint \"\\n</div>\\n\";\nprint \"</body>\\n</html>\\n\";\n\nif os.environ['REQUEST_METHOD'] == 'POST':\n\tprint 'POST REQUEST'\ndata = cgi.FieldStorage()\nosize=data[\"wheel_radius\"].value\n#b =data[\"begining_point\"].value\nx = data[\"begin\"].value\ny = data[\"point_num\"].value\nz = data[\"breast_wide\"].value\n\nret = random.random()\nprint ret;\nfile = open('change.txt','w')\n#str_temp = ret+\"\\n\"\nfile.write(str(ret)+\"\\n\")\nfile.write(osize+\"\\n\")\nfile.write(x+\"\\n\")\nfile.write(y+\"\\n\")\nfile.write(z+\"\\n\")\nfile.close()\nprint ret;\n" }, { "alpha_fraction": 0.42842161655426025, "alphanum_fraction": 0.46801257133483887, "avg_line_length": 34.67307662963867, "blob_id": "7be923cd1cdb1ff443b1fecdcf25651284599935", "content_id": "934a2898d913631eb5214aae14201b707d96874e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3814, "license_type": "no_license", "max_line_length": 175, "num_lines": 104, "path": "/app/utils/Cylinder/makeobj.py", "repo_name": "hermosa-circulo/hermosa_circulo", "src_encoding": "UTF-8", "text": "import math\r\nimport Vertex\r\ndef VectorSubstruction(v1,v2):\r\n ans = [0 for i in range(len(v1))]\r\n for i in range(len(v1)):\r\n ans[i] = v1[i]-v2[i]\r\n return ans\r\ndef CrossProduct(a,b):\r\n ans = [0 for i in range(len(a))]\r\n ans[0] = (a[1]*b[2]-a[2]*b[1])\r\n ans[1] = (a[2]*b[0]-a[0]*b[2])\r\n ans[2] = (a[0]*b[1]-a[1]*b[0])\r\n return ans\r\n\r\ndef returnVN(v):\r\n vn =[[[0 for i in range(3)]for j in range(len(v[0]))]for k in range(len(v)-1)]\r\n for i in range(len(v)-1):\r\n for j in range(len(v[0])):\r\n if j == len(v[0])-1:\r\n a = VectorSubstruction(v[i][j],v[i+1][j])\r\n b = VectorSubstruction(v[i][j],v[i][0])\r\n vn[i][j] = CrossProduct(a,b)\r\n else:\r\n a = VectorSubstruction(v[i][j],v[i+1][j])\r\n b = VectorSubstruction(v[i][j],v[i][j+1])\r\n vn[i][j] = CrossProduct(a,b)\r\n return vn\r\n\r\ndef returnF(v,vn):\r\n f = [[\"\" for i in range(len(vn[0]))]for i in range(len(vn))]\r\n for i in range(len(v)-1):\r\n vnum0 = i*len(v[0])+1\r\n vnum1 = (i+1)*len(v[0])+1\r\n for j in range(len(v[0])):\r\n if j == len(v[0])-1:\r\n f[i][j] = \"f \"+str(vnum0)+\"//\"+str(vnum0+j)+\" \"+str(vnum1)+\"//\"+str(vnum0+j)+\" \"+str(vnum1+j)+\"//\"+str(vnum0+j)+\" \"+str(vnum0+j)+\"//\"+str(vnum0+j)+\"\\n\"\r\n else:\r\n f[i][j] = \"f \"+str(vnum0+j+1)+\"//\"+str(vnum0+j)+\" \"+str(vnum1+j+1)+\"//\"+str(vnum0+j)+\" \"+str(vnum1+j)+\"//\"+str(vnum0+j)+\" \"+str(vnum0+j)+\"//\"+str(vnum0+j)+\"\\n\"\r\n return f\r\n\r\ndef CapV(v,jud):\r\n if jud == 0:\r\n\tpoint = 0\r\n else:\r\n\tpoint = len(v) - 1\r\n\r\n a = VectorSubstruction(v[point][0],v[point][1])\r\n b = VectorSubstruction(v[point][0],v[point][2])\r\n vncap= CrossProduct(a,b)\r\n if not jud:\r\n for i in range(len(vncap)):\r\n vncap[i]=-vncap[i]\r\n return vncap\r\n\r\ndef CapF(v,vn,jud):\r\n if jud == 0:\r\n point = 1\r\n vnpoint = (len(vn))*len(vn[0])+1\r\n else:\r\n point = (len(v)-1)*len(v[0])+1\r\n vnpoint = (len(vn))*len(vn[0])+2\r\n fcap = \"\"\r\n for i in range(len(v[0])-2):\r\n fcap += \"f \"+str(point)+\"//\"+str(vnpoint)+\" \"+str(point+i+1)+\"//\"+str(vnpoint)+\" \"+str(point+i+2)+\"//\"+str(vnpoint)+(\"\\n\")\r\n return fcap\r\n\r\ndef make(wheel_radius,begining_point,closoid_number,point_num,breast_wide):\r\n #v = [[[0,0,0],[10,0,0],[10,10,0],[0,10,0]],[[0,0,10], [10,0,10],[10,10,10],[0,10,10]]]\r\n v = Vertex.makeV(wheel_radius,begining_point,closoid_number,point_num,breast_wide)\r\n objfile_str = \"\"\r\n for i in range(len(v)):\r\n for j in range(len(v[i])):\r\n objfile_str += \"v \"+str(v[i][j][0])+\" \"+str(v[i][j][1])+\" \"+str(v[i][j][2])+\"\\n\"\r\n for i in range(len(v)-1):\r\n for j in range(len(v[i])-1):\r\n objfile_str += \"f \"+str(len(v[i])*i+j+1)+\" \"+str(len(v[i])*i+j+2)+\" \"+str(len(v[i])*(i+1)+j+2)+\" \"+str(len(v[i])*(i+1)+j+1)+\"\\n\"\r\n return objfile_str\r\n \r\n '''\r\n center = [0,0,0]\r\n vn = returnVN(v)\r\n f = returnF(v,vn)\r\n vncap = CapV(v,1)\r\n fcap = CapF(v,vn,1)\r\n vncap_first = CapV(v,0)\r\n fcap_first = CapF(v,vn,0)\r\n\t#####make str #####################################\r\n objfile_str = \"\"\r\n objfile_str += \"g cube\\n\"\r\n for i in v:\r\n for j in i:\r\n objfile_str += \"v \"+str(j[0])+\" \"+str(j[1])+\" \"+str(j[2])+\"\\n\"\r\n for i in vn:\r\n for j in i:\r\n objfile_str += \"vn \"+str(j[0])+\" \"+str(j[1])+\" \"+str(j[2])+\"\\n\"\r\n objfile_str += \"vn \"+str(vncap[0])+\" \"+str(vncap[1])+\" \"+str(vncap[2])+\"\\n\"\r\n objfile_str += \"vn \"+str(vncap_first[0])+\" \"+str(vncap_first[1])+\" \"+str(vncap_first[2])+\"\\n\"\r\n for i in f:\r\n \tfor j in i:\r\n\t objfile_str += str(j)\r\n objfile_str += fcap\r\n objfile_str += fcap_first\r\n return objfile_str\r\n '''\r\n" }, { "alpha_fraction": 0.574999988079071, "alphanum_fraction": 0.612500011920929, "avg_line_length": 10.972972869873047, "blob_id": "6c7b48a48ba2044417fde4b2481506f3bd99703d", "content_id": "780de1cc63211f57f2efc5cd40d964e4fd057de6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 496, "license_type": "no_license", "max_line_length": 89, "num_lines": 37, "path": "/README.md", "repo_name": "hermosa-circulo/hermosa_circulo", "src_encoding": "UTF-8", "text": "# hermosa_circulo\r\n\r\n\r\n## Description\r\n\r\n- 3DモデルをIGAで最適化\r\n\r\n## Requirement\r\n\r\n- python 2.7\r\n- [Django](https://docs.djangoproject.com/ja/1.11/)\r\n\r\n## Install\r\n\r\n- TBD\r\n\r\n## Usage\r\n\r\n```\r\n$ docker-compose up -d\r\n```\r\n## Document\r\n\r\n- TBD\r\n\r\n## Demo\r\n\r\n- [sample](http://ec2-52-198-214-79.ap-northeast-1.compute.amazonaws.com/boobs_designer/)\r\n\r\n## Author\r\n\r\n- [m-masataka](https://github.com/m-masataka)\r\n- [bandoshintaro](https://github.com/bandoshintaro)\r\n\r\n## Support\r\n\r\n- TBD\r\n" }, { "alpha_fraction": 0.5646258592605591, "alphanum_fraction": 0.5646258592605591, "avg_line_length": 35, "blob_id": "166aa1fe7cdfb5bb521a836ea7bcc822f23fea7e", "content_id": "790203cc1a2c9047be137cc4169472dd1f690bcf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 294, "license_type": "no_license", "max_line_length": 57, "num_lines": 8, "path": "/static/javascript/portal.js", "repo_name": "hermosa-circulo/hermosa_circulo", "src_encoding": "UTF-8", "text": "var app = angular.module('portal',[]);\r\napp.controller('portal_controller',fuction(){\r\n this.links =[\r\n {name:'nasne', url:'http://nasne.buttobi.com'},\r\n {name:'router', url:'http://router.buttibi.com'},\r\n {name:'home', url:'https://portal.buttobi.com'}\r\n ];\r\n});" }, { "alpha_fraction": 0.32946860790252686, "alphanum_fraction": 0.40193235874176025, "avg_line_length": 22.069766998291016, "blob_id": "d7ac752ecb9c1123adad4272b073bd08b4116461", "content_id": "bb4f1bbb97a9fff090762f50eb084827fc5b2af7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1035, "license_type": "no_license", "max_line_length": 62, "num_lines": 43, "path": "/app/utils/Cylinder/Closoid.py", "repo_name": "hermosa-circulo/hermosa_circulo", "src_encoding": "UTF-8", "text": "import sys\r\nimport math\r\n\r\ndef Kai(n):\r\n if n == 0:\r\n return 1\r\n else:\r\n m = n\r\n n -= 1\r\n return m * Kai(n)\r\n\r\ndef Closoid(tau):\r\n A = k = 1\r\n x = 0\r\n x_1 = 2\r\n F_k_x = F_k_y = 0\r\n temp_f = 2\r\n while(math.fabs(temp_f-F_k_x) > 0.00001):\r\n temp_f = F_k_x\r\n f_1 = math.pow(-1,k-1)\r\n f_2 = 1.0/(Kai(2*(k-1))*(4*k-3))\r\n f_3 = math.pow(tau,2*(k-1))\r\n F_k_x += f_1*f_2*f_3\r\n k += 1\r\n #print \"%s %s %s\" %(f_1,f_2,f_3)\r\n #print F_k_x\r\n temp_f = 2\r\n k = 1\r\n while(math.fabs(temp_f-F_k_y) > 0.00001):\r\n temp_f = F_k_y\r\n f_1 = math.pow(-1,k-1)\r\n f_2 = 1.0/(Kai(2*k-1)*(4*k-1))\r\n f_3 = math.pow(tau,2*(k-1))\r\n F_k_y += f_1*f_2*f_3\r\n k = k+1\r\n #print \"%s %s %s\" %(f_1,f_2,f_3)\r\n #print F_k_y\r\n n =[F_k_x*A*math.sqrt(2*tau),F_k_y*A*tau*math.sqrt(2*tau)]\r\n return n\r\ndef Formula(x):\r\n point = Closoid((math.pi/60)*x)\r\n n = [point[0]*100,point[1]*100]\r\n return n\r\n" }, { "alpha_fraction": 0.3519619405269623, "alphanum_fraction": 0.4200356602668762, "avg_line_length": 42.128204345703125, "blob_id": "2b7fdefcaf3136cfcc831c3379e0ed6f4d2166b1", "content_id": "73187c91156989e21fbe613b3080103acf73bf40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3364, "license_type": "no_license", "max_line_length": 114, "num_lines": 78, "path": "/app/utils/lattice/lattice.py", "repo_name": "hermosa-circulo/hermosa_circulo", "src_encoding": "UTF-8", "text": "import math\ndef make_sphere():\n array_v = []\n theata = 0\n radius = 10\n z = 0\n i = 0\n while z <= radius:\n r = math.sqrt(radius*radius - z*z)\n count = 0\n array_v.append([])\n while True:\n x = r * math.cos(math.pi*10*count/360)\n y = r * math.sin(math.pi*10*count/360)\n array_v[i].append([x,y,z])\n if 720 == 10*count:\n break\n count = count + 1\n z = z + 0.2\n i = i + 1\n return array_v\n\ndef lattice(array_v):\n point = [[10,10,10],[10,-10,10],[-10,-10,10],[-10,10,10],\n [10,10,0],[10,-10,0],[-10,-10,0],[-10,10,0]]\n point_next = [[10,10,10],[10,-10,10],[-10,-10,10],[-10,10,10],\n [10,10,0],[10,-10,0],[-10,-10,0],[-10,10,0]]\n edge = [[3,1,4],\n [2,0,5],\n [1,3,6],\n [0,2,7],\n [7,5,0],\n [6,4,1],\n [5,7,2],\n [4,6,3]]\n for i in range(len(array_v)):\n for j in range(len(array_v[i])):\n for k in range(len(point)):\n #### x move #####\n x_per = (point_next[k][0]-point[edge[k][0]][0])/(point[k][0]-point[edge[k][0]][0])\n y_per = (array_v[i][j][1]-point[edge[k][1]][1])/(point[k][1]-point[edge[k][1]][1])\n z_per = (array_v[i][j][2]-point[edge[k][2]][2])/(point[k][2]-point[edge[k][2]][2])\n x_next = point[edge[k][0]][0]+(array_v[i][j][0] - point[edge[k][0]][0])*(1-(1-x_per)*y_per*z_per)\n array_v[i][j][0] = x_next\n #### y move #####\n y_per = (point_next[k][1]-point[edge[k][1]][1])/(point[k][1]-point[edge[k][1]][1])\n x_per = (array_v[i][j][0]-point[edge[k][0]][0])/(point[k][0]-point[edge[k][0]][0])\n z_per = (array_v[i][j][2]-point[edge[k][2]][2])/(point[k][2]-point[edge[k][2]][2])\n y_next = point[edge[k][1]][1]+(array_v[i][j][1] - point[edge[k][1]][1])*(1-(1-y_per)*x_per*z_per)\n array_v[i][j][1] = y_next\n #### z move #####\n z_per = (point_next[k][2]-point[edge[k][2]][2])/(point[k][2]-point[edge[k][2]][2])\n x_per = (array_v[i][j][0]-point[edge[k][0]][0])/(point[k][0]-point[edge[k][0]][0])\n y_per = (array_v[i][j][1]-point[edge[k][1]][1])/(point[k][1]-point[edge[k][1]][1])\n z_next = point[edge[k][2]][2]+(array_v[i][j][2] - point[edge[k][2]][2])*(1-(1-z_per)*x_per*y_per)\n array_v[i][j][2] = z_next\n return array_v\n\ndef Lattice_obj(): \n array_v = make_sphere()\n array_v = lattice(array_v)\n str_obj = \"\"\n ###### Vertices ######\n for i in range(len(array_v)):\n for j in range(len(array_v[i])):\n str_obj += \"v \"+str(array_v[i][j][0])+\" \"+str(array_v[i][j][1])+\" \"+str(array_v[i][j][2])+\"\\n\"\n #### Face #####\n for i in range(len(array_v)-1):\n for j in range(len(array_v[i])-1):\n str_obj += \"f \"\n str_obj += str(len(array_v[i])*i+j+1)+\" \"\n str_obj += str(len(array_v[i])*i+j+2)+\" \"\n str_obj += str(len(array_v[i])*(i+1)+j+2)+\"\\n\"\n str_obj += \"f \"\n str_obj += str(len(array_v[i])*i+j+1)+\" \"\n str_obj += str(len(array_v[i])*(i+1)+j+2)+\" \"\n str_obj += str(len(array_v[i])*(i+1)+j+1)+\"\\n\"\n return str_obj\n" }, { "alpha_fraction": 0.6887686252593994, "alphanum_fraction": 0.6941813230514526, "avg_line_length": 42.47058868408203, "blob_id": "0c58046d7292393282ccbaf98217fe512a88756b", "content_id": "b05ba1c61fc5e025c716e6acc3b3c0c3a245516a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 739, "license_type": "no_license", "max_line_length": 95, "num_lines": 17, "path": "/app/urls.py", "repo_name": "hermosa-circulo/hermosa_circulo", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*- vim: set et ts=4 sw=4 :\n\nfrom django.conf.urls import include, url, patterns\nfrom django.contrib import admin\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\nfrom app.views import *\nfrom . import views\n\nurlpatterns = [\n url(r'^$',MainView.as_view(), name='index'),\n url(r'^boobs_blender/$',Boobs_BlenderView.as_view(), name='boobs_blender'),\n url(r'^lattice/$',latticeView.as_view(),name='lattice'), \n url(r'^update/$',views.update_3D_object, name='update'),\n url(r'^update_lattice_object/$',views.update_lattice_object, name='update_lattice_object'),\n url(r'^makeBlender/$',views.executeBlender, name='makeBlender'),\n url(r'^execIGA/$',views.executeIGA, name='execIGA'),\n]\n" }, { "alpha_fraction": 0.6091790199279785, "alphanum_fraction": 0.626282811164856, "avg_line_length": 31.183486938476562, "blob_id": "0a4bdf4916f02b39d2bc89d7110b01c9d49c1d6f", "content_id": "c3536f27129bd0352673b42b37a68cdaac8594d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3636, "license_type": "no_license", "max_line_length": 141, "num_lines": 109, "path": "/app/views.py", "repo_name": "hermosa-circulo/hermosa_circulo", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*- vim: set et ts=4 sw=4 :\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.views.generic import TemplateView\nfrom django.http import HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\nimport os\nimport commands\nimport random\nfrom app.utils.Cylinder import makeobj\nfrom app.utils.lattice import lattice\nfrom django.core.files import File\n#from app.utils.bb import Lattice\n\nclass MainView(TemplateView):\n '''\n タイトルページ\n '''\n template_name = \"index.html\"\n def index(request):\n pass\n\nclass Boobs_BlenderView(TemplateView):\n '''\n Blenderのページ\n '''\n template_name = \"boobs_blender.html\"\n def boobs_blender(request):\n pass\n\nclass latticeView(TemplateView):\n '''\n latticeのページ\n '''\n template_name = \"lattice.html\"\n def IGA(request):\n pass\n\ndef update_lattice_object(request):\n '''\n latticeのモデルを更新\n '''\n to_x = float(request.POST.get(\"x\",0.0))*10\n to_y = float(request.POST.get(\"y\",0.0))*10\n to_z = float(request.POST.get(\"z\",0.0))*10\n ret = lattice.Lattice_obj(to_x, to_y, to_z)\n module_dir = os.path.dirname(__file__)\n file_path = os.path.join(module_dir, '../static/OBJfile/lattice.obj')\n file = open(file_path,'w')\n file.write(ret)\n return HttpResponseRedirect(reverse('lattice'))\n\ndef update_3D_object(request):\n '''\n 3Dモデルの更新をするページ\n '''\n wheel_radius = int(request.POST.get(\"wheel_radius\",0))\n begining_point = int(request.POST.get(\"begining_point\",0))\n begin = 100 - int(request.POST.get(\"begin\",0))\n point_num = int(request.POST.get(\"point_num\",0))\n breast_wide = 1.0 - float(request.POST.get(\"breast_wide\",0.0))\n ret = makeobj.make(wheel_radius,begining_point,begin,point_num,breast_wide)\n module_dir = os.path.dirname(__file__)\n file_path = os.path.join(module_dir, '../static/OBJfile/model2.obj')\n file = open(file_path,'w')\n file.write(ret)\n file.close()\n return HttpResponseRedirect(reverse('index'))\n #return HttpResponse(ret)\n\ndef executeBlender(request):\n '''\n Blenderによる3Dモデルの更新をするページ\n '''\n wheel_radius = request.POST.get(\"wheel_radius\",0)\n begin = request.POST.get(\"begin\",0)\n point_num = request.POST.get(\"point_num\",0)\n breast_wide = request.POST.get(\"breast_wide\",0.0)\n\n currentdir = str(os.getcwd())\n strcommand = \"blender --background --python \"+currentdir+\"/app/utils/bb/Lattice.py \"+wheel_radius+\" \"+begin+\" \"+point_num+\" \"+breast_wide\n check = commands.getoutput(strcommand)\n\n return HttpResponseRedirect(reverse('boobs_blender'))\n\ndef executeIGA(request):\n '''\n IGAによる個体の最適化をするページ\n '''\n file_num = 3\n para_num = 5\n parameter=[[0 for i in range(para_num)] for j in range(file_num)]\n for i in range(len(parameter)):\n for j in range(len(parameter[0])):\n if j == 1:\n parameter[i][j] = 10\n elif j == 3:\n parameter[i][j] = 30\n elif j == 4:\n parameter[i][j] = random.uniform(0.15,0.6)\n else:\n parameter[i][j] = random.randint(30,70)\n\n for i in range(len(parameter)):\n ret = makeobj.make(parameter[i][0],parameter[i][1],parameter[i][2],parameter[i][3],parameter[i][4])\n file = open(os.path.join(os.getcwd(),'app/static/OBJfile/iga/iga'+str(i)+'.obj'),'w')\n file.write(ret)\n file.close()\n return HttpResponseRedirect(reverse('IGA'))\n" }, { "alpha_fraction": 0.6190476417541504, "alphanum_fraction": 0.6476190686225891, "avg_line_length": 25.25, "blob_id": "f1d76ef4b80a3377d97c8f657e28de767ae5ca1d", "content_id": "dfcee083fa662b5e15a492ca21e0787513c220a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 105, "license_type": "no_license", "max_line_length": 47, "num_lines": 4, "path": "/app/models.py", "repo_name": "hermosa-circulo/hermosa_circulo", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*- vim: set et ts=4 sw=4 :\nfrom django.db import models\n\n# Create your models here.\n" }, { "alpha_fraction": 0.5717054009437561, "alphanum_fraction": 0.6072351336479187, "avg_line_length": 21.405797958374023, "blob_id": "d8a72e5e5271d80cde4867b9a33cda1d71fb29ff", "content_id": "d963220fa7f04d8c0d2d9ff4a4263f0ba58ebb8a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1548, "license_type": "no_license", "max_line_length": 88, "num_lines": 69, "path": "/static/javascript/ui-slider.js", "repo_name": "hermosa-circulo/hermosa_circulo", "src_encoding": "UTF-8", "text": "\njQuery( function(){\n\tjQuery( '#jquery-ui-slider-1').slider({\n\t\trange: 'min',\n\t\tvalue: 45,\n\t\tmin: 0,\n\t\tmax: 100,\n\t\tstep: 1,\n\t\tslide: function(event,ui){\n\t\t\tjQuery('#jquery-ui-slider-value-1').val(ui.value);\n\t\t}\n\t});\n\tjQuery('#jquery-ui-slider-value-1').val(jQuery('#jquery-ui-slider-1').slider('value'));\n});\n\njQuery( function(){\n\tjQuery( '#jquery-ui-slider-2').slider({\n\t\trange: 'min',\n\t\tvalue: 10,\n\t\tmin: 0,\n\t\tmax: 100,\n\t\tstep: 1,\n\t\tslide: function(event,ui){\n\t\t\tjQuery('#jquery-ui-slider-value-2').val(ui.value);\n\t\t}\n\t});\n\tjQuery('#jquery-ui-slider-value-2').val(jQuery('#jquery-ui-slider-2').slider('value'));\n});\n\njQuery( function(){\n\tjQuery( '#jquery-ui-slider-3').slider({\n\t\trange: 'min',\n\t\tvalue: 60,\n\t\tmin: 0,\n\t\tmax: 100,\n\t\tstep: 1,\n\t\tslide: function(event,ui){\n\t\t\tjQuery('#jquery-ui-slider-value-3').val(ui.value);\n\t\t}\n\t});\n\tjQuery('#jquery-ui-slider-value-3').val(jQuery('#jquery-ui-slider-3').slider('value'));\n});\n\njQuery( function(){\n\tjQuery( '#jquery-ui-slider-4').slider({\n\t\trange: 'min',\n\t\tvalue: 30,\n\t\tmin: 20,\n\t\tmax: 60,\n\t\tstep: 1,\n\t\tslide: function(event,ui){\n\t\t\tjQuery('#jquery-ui-slider-value-4').val(ui.value);\n\t\t}\n\t});\n\tjQuery('#jquery-ui-slider-value-4').val(jQuery('#jquery-ui-slider-4').slider('value'));\n});\n\njQuery( function(){\n\tjQuery( '#jquery-ui-slider-5').slider({\n\t\trange: 'min',\n\t\tvalue: 0.5,\n\t\tmin: 0,\n\t\tmax: 1,\n\t\tstep: 0.01,\n\t\tslide: function(event,ui){\n\t\t\tjQuery('#jquery-ui-slider-value-5').val(ui.value);\n\t\t}\n\t});\n\tjQuery('#jquery-ui-slider-value-5').val(jQuery('#jquery-ui-slider-5').slider('value'));\n});\n\n" } ]
13
500swapnil/tictactoe-bot
https://github.com/500swapnil/tictactoe-bot
018ffb82966ef2d85d9a9b88a2553df3bcf40a20
1a31e423eb11f03c9a87f5c1fa6c9d4444d5b0ce
99df6415eb275af37623d935e517ec0b2a36dd54
refs/heads/master
2021-08-31T17:36:31.032804
2017-12-22T08:13:05
2017-12-22T08:13:05
115,091,533
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.3832453787326813, "alphanum_fraction": 0.42216357588768005, "avg_line_length": 27.60377311706543, "blob_id": "7f8d4d181b2b8c351ed1385bcccd62f7afd1225e", "content_id": "d8e538d3c53cf1aa79a265434f51c0e5ac4855c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4548, "license_type": "no_license", "max_line_length": 174, "num_lines": 159, "path": "/tictactoe.py", "repo_name": "500swapnil/tictactoe-bot", "src_encoding": "UTF-8", "text": "import random\nimport os\nimport time\n\nos.system(\"clear\")\n\nboard = [[' ',' ',' '],\n [' ',' ',' '],\n [' ',' ',' ']]\nturn = 1\n\ndef printBoard(board):\n print('-------------')\n for i in range(3):\n print('|',board[i][0],'|',board[i][1],'|',board[i][2],'|')\n print('-------------')\n\ndef play(board,turn,auto=False):\n if auto:\n print(\"My turn to play\",\"X\" if turn == 1 else \"O\")\n time.sleep(1)\n (a,b) = autoplay(board,turn)\n a -= 1\n b -= 1\n else:\n if turn == 1:\n inp = input(\"Choose the row and column to play X in: \").split()\n else:\n inp = input(\"Choose the row and column to play O in: \").split()\n if len(inp) != 2:\n print('Invalid move')\n play(board,turn)\n return\n\n if inp[0].isdigit() == False or inp[1].isdigit() == False:\n print('Invalid move')\n play(board,turn)\n return \n\n if int(inp[0])>3 or int(inp[1])>3 or int(inp[1])<1 or int(inp[0])<1:\n print('Invalid move')\n play(board,turn)\n return\n\n a = int(inp[0]) - 1\n b = int(inp[1]) - 1\n if board[a][b] != ' ':\n print('Invalid move')\n play(board,turn)\n return\n if turn == 1:\n board[a][b] = 'X'\n else:\n board[a][b] = 'O'\n\ndef check(b):\n if (b[0][0] == b[0][1] and b[0][1] == b[0][2]) or (b[0][0] == b[1][0] and b[1][0] == b[2][0]) or (b[0][0] == b[1][1] and b[1][1] == b[2][2]):\n if b[0][0] == 'X':\n return 1\n elif b[0][0] == 'O':\n return 0\n if (b[1][1] == b[1][0] and b[1][1] == b[1][2]) or (b[1][1] == b[0][1] and b[1][1] == b[2][1]) or (b[1][1] == b[0][2] and b[1][1] == b[2][0]):\n if b[1][1] == 'X':\n return 1\n elif b[1][1] == 'O':\n return 0\n if (b[2][2] == b[2][1] and b[2][1] == b[2][0]) or (b[2][2] == b[1][2] and b[2][2] == b[0][2]):\n if b[2][2] == 'X':\n return 1\n elif b[2][2] == 'O':\n return 0\n return 2\n\ndef autoplay(board,turn):\n bestVal = -100\n bestMove = [0,2]\n for i in range(3):\n for j in range(3):\n if board[i][j] == ' ':\n board[i][j] = 'X' if turn==1 else 'O'\n move = minimax(board,False,0,turn)\n if bestVal < move:\n bestMove = [i,j]\n bestVal = move\n board[i][j] = ' '\n bestMove[0] += 1\n bestMove[1] += 1\n return bestMove\n \ndef minimax(board,ismax,depth,turn):\n if check(board) != 2:\n if check(board) == turn:\n return 10 - depth\n else:\n return -10 + depth\n \n if ismax:\n best = -100\n for i in range(3):\n for j in range(3):\n if board[i][j] == ' ':\n board[i][j] = 'X' if turn==1 else 'O'\n best = max(best,minimax(board,False,depth+1,turn))\n board[i][j] = ' '\n if best == -100:\n return 0\n return best\n \n else:\n best = 100\n for i in range(3):\n for j in range(3):\n if board[i][j] == ' ':\n board[i][j] = 'X' if turn==0 else 'O'\n best = min(best,minimax(board,True,depth+1,turn))\n board[i][j] = ' '\n if best == 100:\n return 0\n return best\n\nprint(\"Welcome to TicTacToe\\nEnter your name or 'AI' as the player name to make it a bot!\\n\\nHow to play?\\n\\nEnter the row and column of where you want to place your X or O\"+\n\"\\n\\nExample\\n\"+\n\"-------------\\n| | X | |\\n\"+\n\"-------------\\n| | | |\\n\"+\n\"-------------\\n| | | |\\n\"+\n\"-------------\\n\\n\"+\n\"1 2\\nThis move is played in the first row and second column\\n\\nPress Ctrl+C anytime to quit\\n\\n\")\np1 = input(\"Enter player 1 name: \")\np2 = input(\"Enter player 2 name: \")\n\nif p1 == \"\":\n p1 = \"Player1\"\nif p2 == \"\":\n p2 = \"Player2\"\n\nc = 0\n\nwhile True:\n os.system(\"clear\")\n printBoard(board)\n if check(board) == 1:\n print(p1,\"wins\")\n break\n elif check(board) == 0:\n print(p2,\"wins\")\n break\n if c == 9:\n print(\"Draw\")\n break\n if (turn == 0 and p2 == \"AI\") or (turn == 1 and p1 == \"AI\"):\n play(board,turn,auto=True)\n else:\n if turn == 0:\n print(p2, \"It's your turn!\")\n else:\n print(p1, \"It's your turn!\")\n play(board,turn)\n c+=1\n turn = (turn+1)%2\n" }, { "alpha_fraction": 0.747474730014801, "alphanum_fraction": 0.7643097639083862, "avg_line_length": 41.57143020629883, "blob_id": "fc68fac8b617ff086aac85e484ecd77ccf69f28f", "content_id": "b3a3227db691139adb3696f350c3b79e5f9647fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 297, "license_type": "no_license", "max_line_length": 233, "num_lines": 7, "path": "/README.md", "repo_name": "500swapnil/tictactoe-bot", "src_encoding": "UTF-8", "text": "# TicTacToe Bot\nA python implementation of the classic tictactoe game along with the option of playing against a bot. The bot is implemented using the minimax algorithm. This game can be played by 1 player and 1 AI bot, 2 players, or even 2 AI bots.\n\n## How to run\n```bash\npython3 tictactoe.py\n```" } ]
2
emeraldant/robo-survival
https://github.com/emeraldant/robo-survival
43d6e21ac1c2164e22e95effcae7c17759e55f1d
af7d5091f2db1220b9d79e3b286102d527705cd1
6ba41b0f92e1e44cdda40814834671f8ab11b7e3
refs/heads/master
2020-04-04T16:48:14.487076
2018-11-04T15:06:03
2018-11-04T15:06:03
156,093,378
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5225740671157837, "alphanum_fraction": 0.5673609375953674, "avg_line_length": 30.581748962402344, "blob_id": "92258a773dc7a057e040037a317a818d2d644b23", "content_id": "cc1a75bbe16c0de893c20553289da2c41f45973e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16612, "license_type": "no_license", "max_line_length": 116, "num_lines": 526, "path": "/Robo Survival/Robo Survival.py", "repo_name": "emeraldant/robo-survival", "src_encoding": "UTF-8", "text": "import pygame\nfrom pygame.locals import *\nimport time\nimport sys\nimport pyganim\nimport random\nimport math\n\npygame.init()\npygame.font.init()\nwindowSurface = pygame.display.set_mode((512,512), 0, 32)\n#pygame.image.load(\"facingLeftRobot.png\")\n\nclass Enemy:\n def __init__(self, name, health_max, x, y, attack, type):\n self.name = name\n self.health_max = health_max\n self.x = x\n self.y = y\n self.attack = attack\n self.type = type #projectile/melee\n\n\nclass Player():\n def __init__(self, health_max, current_health):\n self.health_max = health_max\n self.current_health = current_health\n\nclass Room:\n def __init__(self, x, y, textures):\n self.x = x\n self.y = y\n self.textures = textures #modern/old/etc\n\nclass Damage:\n def __init__(self, attack_power, range, type):\n self.attack_power = attack_power\n self.range = range\n self.type = type\n\nhappened = False\nheliMob = Enemy(name=\"Heli\", health_max=10, attack=2, type=\"projectile\", x=128, y=70)\nplayer = Player(health_max=20, current_health=20)\nstartRoom = Room(x=256, y=256, textures=\"old\")\n\n\n\n\n# Drawing player health bar\ndef draw_health_bar(health):\n\n if health > 15:\n health_color = GREEN\n elif health > 10:\n health_color = (255,255,0)\n else:\n health_color = (255,0,0)\n pygame.draw.rect(windowSurface, health_color, (5, 10, health * 2.5, 10))\n\n# Drawing individual health bars for enemy bots\n\n# def enemy_health_bar(health):\n#\n# if health > 15:\n# health_color = GREEN\n# elif health > 10:\n# health_color = (255,255,0)\n# else:\n# health_color = (255,0,0)\n#\n# healthbar1 = pygame.draw.rect(windowSurface, health_color, (eBox1.x, eBox1.y - 10, health * 2, 6))\n# healthbar2 = pygame.draw.rect(windowSurface, health_color, (eBox2.x, eBox2.y - 10, health * 2, 6))\n#\n# return healthbar1, healthbar2\n\ndef boss_health_bar(health):\n\n if health > 15:\n health_color = GREEN\n elif health > 10:\n health_color = (255,255,0)\n else:\n health_color = (255,0,0)\n pygame.draw.rect(windowSurface, health_color, (130, 100, health * 15, 10))\n\n\n\nBLACK = (0,0,0)\nGREEN = (0,255,0)\nWHITE = (0,0,0)\nPLAYERSPEED = 4\n\nbackground = pygame.image.load(\"NewBackground.png\")\nplayerBot = pygame.image.load(\"facingLeftRobot.png\").convert_alpha()\ntransColor = playerBot.get_at((20,20))\nplayerBot.set_colorkey(transColor)\nbotRect = pygame.Rect(248,248,32,32)\n#background.blit(Robot,playerBot)\nplayerJab = pygame.Rect(248,248,48,32)\n\nenemy1 = pygame.image.load(\"EnemyPosition1.png\").convert_alpha()\neBox1 = pygame.Rect(120,376,32,32)\ne1Life = \"alive\"\n\nenemy2 = pygame.image.load(\"EnemyPosition1.png\").convert_alpha()\neBox2 = pygame.Rect(376,376,32,32)\ne2Life = \"alive\"\n\nenemy3 = pygame.image.load(\"EnemyPosition1.png\").convert_alpha()\neBox3 = pygame.Rect(240,376,32,32)\ne3Life = \"alive\"\n\nenemy4 = pygame.image.load(\"EnemyPosition1.png\").convert_alpha()\neBox4 = pygame.Rect(192,136,32,32)\ne4Life = \"alive\"\n\nenemy5 = pygame.image.load(\"EnemyPosition1.png\").convert_alpha()\neBox5 = pygame.Rect(320,136,32,32)\ne5Life = \"alive\"\n\nboss = pygame.image.load(\"SpiderBoss.png\").convert_alpha()\nbBox = pygame.Rect(224,224,64,64)\nbossLife = \"alive\"\n\nfireballListX = []\nfireballListY = []\n\ncurrent_health = 70\nhealth_max = 20\nx_speed = 10\ny_speed = 10\nboss_health = 20\nmoveLeft = False\nmoveRight = False\nmoveUp = False\nmoveDown = False\nattacking = False\ndirection = \"r\"\ndamage = 1\n\nmainClock = pygame.time.Clock()\n\n\n\nwindowSurface = pygame.display.set_mode((512,512), 0, 32)\npygame.display.set_caption(\"Dungeon Game\")\nwindowSurface.fill(BLACK)\npygame.display.update()\n\nleftAttackAnim = pyganim.PygAnimation([('facingLeftRobot.png',0.5),\n ('leftAttack.png',0.5)])\nrightAttackAnim = pyganim.PygAnimation([('facingRightRobot.png',0.5),\n ('rightAttack.png',0.5)])\ndownAttackAnim = pyganim.PygAnimation([('facingDownRobot.png',0.5),\n ('downAttack.png',0.5)])\nupAttackAnim = pyganim.PygAnimation([('facingUpRobot.png',0.5),\n ('upAttack.png',0.5)])\nheliAnimLeft = pyganim.PygAnimation([('EnemyPosition1.png',0.5),\n ('EnemyPosition2.png',0.5)])\nheliAnimRight = pyganim.PygAnimation([('EnemyPosition1f.png',0.5),\n ('EnemyPosition2f.png',0.5)])\n\nleftAttackAnim.play()\nrightAttackAnim.play()\ndownAttackAnim.play()\nupAttackAnim.play()\nheliAnimLeft.play()\nheliAnimRight.play()\ntotal_score = 0\n\nagainDisplay = pygame.font.Font(\"freesansbold.ttf\",32)\nagainSurface = againDisplay.render(\"Retry\",True,WHITE,BLACK)\nagainRect = againSurface.get_rect()\nagainRect.center = (256,384)\n\nfireball = pygame.image.load(\"fireball.png\").convert_alpha()\n\nfireballListX = []\nfireballListY = []\nfireballSlopeX = []\nfireballSlopeY = []\nplaying = True\nwhile playing:\n healthText = pygame.font.SysFont('Comic Sans MS', 20)\n displayText = healthText.render(\"HP: \" + str(current_health) , False, (255,255,255))\n displayScore = pygame.font.SysFont('Comic Sans MS', 20)\n currentScore = displayScore.render(\"Score: \" + str(total_score) , False, (255,255,255))\n windowSurface.fill(BLACK)\n windowSurface.blit(background,[0,0])\n windowSurface.blit(playerBot, botRect)\n windowSurface.blit(displayText,[220,11])\n windowSurface.blit(currentScore,[410,11])\n tempHitBox = pygame.Rect(-5,-5,1,1)\n\n for event in pygame.event.get():\n if event.type == QUIT:\n playing = False\n pygame.quit()\n sys.exit()\n if event.type == KEYDOWN:\n if event.key == K_LEFT or event.key == ord(\"a\"):\n moveLeft = True\n direction = \"l\"\n if event.key == K_RIGHT or event.key == ord(\"d\"):\n moveRight = True\n direction = \"r\"\n if event.key == K_UP or event.key == ord(\"w\"):\n moveUp = True\n direction = \"u\"\n if event.key == K_DOWN or event.key == ord(\"s\"):\n moveDown = True\n direction = \"d\"\n if event.key == K_SPACE:\n if direction == \"l\":\n windowSurface.blit(background,[0,0])\n leftAttackAnim.blit(windowSurface,(botRect.x-16,botRect.y))\n tempHitBox = pygame.Rect(botRect.x-16,botRect.y,5,botRect.height)\n elif direction == \"r\":\n windowSurface.blit(background,[0,0])\n rightAttackAnim.blit(windowSurface,(botRect.x,botRect.y))\n tempHitBox = pygame.Rect(botRect.x+16,botRect.y,5,botRect.height)\n elif direction == \"u\":\n windowSurface.blit(background,[0,0])\n upAttackAnim.blit(windowSurface,(botRect.x,botRect.y-8))\n tempHitBox = pygame.Rect(botRect.x,botRect.y-16,botRect.width,5)\n elif direction == \"d\":\n windowSurface.blit(background,[0,0])\n downAttackAnim.blit(windowSurface,(botRect.x,botRect.y))\n tempHitBox = pygame.Rect(botRect.x,botRect.y+16,botRect.width,5)\n if event.type == KEYUP:\n if event.key == K_LEFT or event.key == ord(\"a\"):\n moveLeft = False\n if event.key == K_RIGHT or event.key == ord(\"d\"):\n moveRight = False\n if event.key == K_UP or event.key == ord(\"w\"):\n moveUp = False\n if event.key == K_DOWN or event.key == ord(\"s\"):\n moveDown = False\n if event.type == MOUSEBUTTONUP:\n mousePos = pygame.mouse.get_pos()\n if againRect.collidepoint(mousePos):\n fireballListX = []\n fireballListY = []\n fireballSlopeX = []\n fireballSlopeY = []\n total_score = 0\n current_health = 70\n health_max = 20\n x_speed = 10\n y_speed = 10\n boss_health = 20\n moveLeft = False\n moveRight = False\n moveUp = False\n moveDown = False\n attacking = False\n direction = \"r\"\n damage = 1\n playing = True\n\n\n\n if (moveLeft == True) and botRect.left >= 0:\n botRect.x -= PLAYERSPEED\n elif (moveRight == True) and botRect.right <= 512:\n botRect.x += PLAYERSPEED\n\n if (moveUp == True) and botRect.top >= 40:\n botRect.y -= PLAYERSPEED\n elif (moveDown == True) and botRect.bottom <= 512:\n botRect.y += PLAYERSPEED\n\n if direction == \"l\":\n playerBot = pygame.image.load(\"facingLeftRobot.png\").convert_alpha()\n elif direction == \"r\":\n playerBot = pygame.image.load(\"facingRightRobot.png\").convert_alpha()\n elif direction == \"u\":\n playerBot = pygame.image.load(\"facingUpRobot.png\").convert_alpha()\n elif direction == \"d\":\n playerBot = pygame.image.load(\"facingDownRobot.png\").convert_alpha()\n\n def shots(eBox):\n possibleShots = [(eBox.x,eBox.y-16),\n (eBox.x+(16/math.sqrt(2)),eBox.y-(16/math.sqrt(2))),\n (eBox.x+16,eBox.y),\n (eBox.x+(16/math.sqrt(2)),eBox.y+(16/math.sqrt(2))),\n (eBox.x,eBox.y+16),\n (eBox.x-(16/math.sqrt(2)),eBox.y+(16/math.sqrt(2))),\n (eBox.x-16,eBox.y),\n (eBox.x-(16/math.sqrt(2)),eBox.y-(16/math.sqrt(2)))]\n\n minimum = 10000\n index = 0\n for i in possibleShots:\n d = math.sqrt(((i[0]-botRect.x) ** 2) + (((i[1]-botRect.y) ** 2)))\n if d < minimum:\n minimum = d\n index = possibleShots.index(i)\n\n\n\n randNum = random.randint(1,20)\n if randNum == 1:\n fireballListX.append(eBox.x)\n fireballListY.append(eBox.y)\n fireballSlopeX.append(possibleShots[index][0]-eBox.x)\n fireballSlopeY.append(possibleShots[index][1]-eBox.y)\n\n if(e1Life != \"dead\"):\n shots(eBox1)\n if(e2Life != \"dead\"):\n shots(eBox2)\n if(e3Life != \"dead\"):\n shots(eBox3)\n if(e4Life != \"dead\"):\n shots(eBox4)\n if(e5Life != \"dead\"):\n shots(eBox5)\n\n\n def moveEnemy(eBox):\n x_speed = 10\n y_speed = 10\n x_speed *= random.choice([-1,1])\n y_speed *= random.choice([-1,1])\n randNum = random.randint(1,4)\n if randNum == 4:\n eBox.x += x_speed\n eBox.y += y_speed\n\n moveEnemy(eBox1)\n moveEnemy(eBox2)\n moveEnemy(eBox3)\n moveEnemy(eBox4)\n moveEnemy(eBox5)\n\n if eBox1.right >= 512:\n eBox1.right = 512\n elif eBox1.left <= 0:\n eBox1.left = 0\n\n if eBox1.top <= 0:\n eBox1.top = 0\n elif eBox1.bottom >= 512:\n eBox1.bottom = 512\n\n if eBox2.right >= 512:\n eBox2.right = 512\n elif eBox2.left <= 0:\n eBox2.left = 0\n\n if eBox2.top <= 0:\n eBox2.top = 0\n elif eBox2.bottom >= 512:\n eBox2.bottom = 512\n\n if eBox3.right >= 512:\n eBox3.right = 512\n elif eBox3.left <= 0:\n eBox3.left = 0\n\n if eBox3.top <= 0:\n eBox3.top = 0\n elif eBox3.bottom >= 512:\n eBox3.bottom = 512\n\n if eBox4.right >= 512:\n eBox4.right = 512\n elif eBox4.left <= 0:\n eBox4.left = 0\n\n if eBox4.top <= 0:\n eBox4.top = 0\n elif eBox4.bottom >= 512:\n eBox4.bottom = 512\n\n if eBox5.right >= 512:\n eBox5.right = 512\n elif eBox5.left <= 0:\n eBox5.left = 0\n\n if eBox5.top <= 0:\n eBox5.top = 0\n elif eBox5.bottom >= 512:\n eBox5.bottom = 512\n\n if e1Life == \"alive\":\n if botRect.x - eBox1.x <= 0:\n heliAnimLeft.blit(windowSurface,(eBox1.x,eBox1.y))\n elif botRect.x - eBox1.x >= 0:\n heliAnimRight.blit(windowSurface,(eBox1.x,eBox1.y))\n\n if e2Life == \"alive\":\n if botRect.x - eBox2.x <= 0:\n heliAnimLeft.blit(windowSurface,(eBox2.x,eBox2.y))\n elif botRect.x - eBox2.x >= 0:\n heliAnimRight.blit(windowSurface,(eBox2.x,eBox2.y))\n\n if e3Life == \"alive\":\n if botRect.x - eBox3.x <= 0:\n heliAnimLeft.blit(windowSurface,(eBox3.x,eBox3.y))\n elif botRect.x - eBox3.x >= 0:\n heliAnimRight.blit(windowSurface,(eBox3.x,eBox3.y))\n\n if e4Life == \"alive\":\n if botRect.x - eBox4.x <= 0:\n heliAnimLeft.blit(windowSurface,(eBox4.x,eBox4.y))\n elif botRect.x - eBox4.x >= 0:\n heliAnimRight.blit(windowSurface,(eBox4.x,eBox4.y))\n\n if e5Life == \"alive\":\n if botRect.x - eBox5.x <= 0:\n heliAnimLeft.blit(windowSurface,(eBox5.x,eBox5.y))\n elif botRect.x - eBox5.x >= 0:\n heliAnimRight.blit(windowSurface,(eBox5.x,eBox5.y))\n\n \n\n if tempHitBox.colliderect(eBox1):\n e1Life = \"dead\"\n total_score += 100\n\n\n\n if tempHitBox.colliderect(eBox2):\n e2Life = \"dead\"\n total_score += 100\n \n\n\n if tempHitBox.colliderect(eBox3):\n e3Life = \"dead\"\n total_score += 100\n \n\n if tempHitBox.colliderect(eBox4):\n e4Life = \"dead\"\n total_score += 100\n \n\n if tempHitBox.colliderect(eBox5):\n e5Life = \"dead\"\n total_score += 100\n \n\n if tempHitBox.colliderect(bBox):\n boss_health -= 1\n \n \n\n\n# enemyHealth = []\n\n # enemyHealth.append(eBox2)\n\n if e1Life != \"alive\" and e2Life != \"alive\" and e3Life != \"alive\" and e4Life != \"alive\" and e5Life != \"alive\":\n\n\n windowSurface.blit(boss,(192,192))\n boss_health_bar(boss_health)\n randNum = random.randint(1,30)\n happened = False\n if boss_health <= 0 and happened == False:\n e1Life = \"alive\"\n e2Life = \"alive\"\n e3Life = \"alive\"\n e4Life = \"alive\"\n e5Life = \"alive\"\n current_health += 40\n boss_health = 20\n happened = True\n bossLabel = pygame.font.SysFont('Comic Sans MS',20)\n bossText = bossLabel.render(\"SPIDER BOSS: \" + str(boss_health) + \"HP\", False, (255,255,255))\n windowSurface.blit(bossText,[160,120])\n if randNum == 1:\n fireballPos = [[248,248]] * 8\n for x in range(8):\n xcoord = random.randint(-16,16)\n ycoord = random.randint(-16,16)\n fireballListX.append(xcoord+248)\n fireballListY.append(ycoord+248)\n fireballSlopeX.append(xcoord)\n fireballSlopeY.append(ycoord)\n\n \n fireballRects = []\n if len(fireballListX) > 0:\n doneFireballs = []\n for i in range(len(fireballListX)):\n fireballRects.append(pygame.Rect(fireballListX[i],fireballListY[i],16,16))\n windowSurface.blit(fireball,(fireballRects[i].x,fireballRects[i].y))\n fireballListX[i] = fireballListX[i]+fireballSlopeX[i]\n fireballListY[i] = fireballListY[i]+fireballSlopeY[i]\n if (fireballListX[i] < 0 or fireballListX[i] > 512) or (fireballListY[i] < 0 or fireballListY[i] > 512):\n doneFireballs.append(i)\n if fireballRects[i].colliderect(botRect):\n \n current_health -= damage\n for i in doneFireballs: \n fireballListX.remove(fireballListX[len(doneFireballs)-1-i])\n fireballListY.remove(fireballListY[len(doneFireballs)-1-i])\n fireballSlopeX.remove(fireballSlopeX[len(doneFireballs)-1-i])\n fireballSlopeY.remove(fireballSlopeY[len(doneFireballs)-1-i])\n fireballRects.remove(fireballRects[len(doneFireballs)-1-i])\n\n draw_health_bar(current_health)\n\n\n \n\n pygame.display.flip()\n pygame.display.update()\n\n\n if current_health <= 0:\n kText = pygame.font.SysFont('Comic Sans MS', 40)\n failText = kText.render(\"GAME OVER\" , False, (255, 0 , 0))\n windowSurface.fill(BLACK)\n windowSurface.blit(failText,[140,170])\n finalScore = pygame.font.SysFont('Comic Sans MS', 40)\n scoreFinal = finalScore.render(\"Final Score: \" + str(total_score), False, (255,255,255))\n windowSurface.blit(scoreFinal,[140,210])\n windowSurface.blit(againSurface,againRect)\n pygame.display.update()\n\n mainClock.tick(30)\n" } ]
1
topistalamaro/Variance
https://github.com/topistalamaro/Variance
e5c716299c5b2d5a31fc948f9a444fce89e2f612
320b50244d6a8a8ae2239f96fff379eb2225633e
be31a9639f4e304ca115faac8109bf0a9e61bba2
refs/heads/main
2023-07-04T14:04:59.219795
2021-08-12T17:39:47
2021-08-12T17:39:47
393,435,294
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5481481552124023, "alphanum_fraction": 0.5629629492759705, "avg_line_length": 9.916666984558105, "blob_id": "fea16c2fe8cec58e1217fe19ce308e0ffa858185", "content_id": "0d1ec9904b7345df79502c44f96fc4e9c38f1bd5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 135, "license_type": "no_license", "max_line_length": 54, "num_lines": 12, "path": "/Variance/weather_data.py", "repo_name": "topistalamaro/Variance", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport pickle\n\nlondon_data = pickle.load( open( \"weather.p\", \"rb\" ) )\n\n\n# In[ ]:\n\n\n\n\n" } ]
1
dylanwach/guitars_test_real
https://github.com/dylanwach/guitars_test_real
7e8703ad3e720186348363fc458d5403906ee204
94dfed1300c13e688ecb04b0ae933aa55a428c39
62ebaef7ae1781be12bb4c7eb74f28eba344dcc0
refs/heads/master
2023-04-27T00:27:44.759722
2021-06-08T01:19:22
2021-06-08T01:19:22
374,842,650
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6341463327407837, "alphanum_fraction": 0.6341463327407837, "avg_line_length": 20, "blob_id": "cb4e200f997ba4efd9eef44457891d1ecd124039", "content_id": "f9911f306b3585ea13defdfe8379f4b45c17278a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 41, "license_type": "no_license", "max_line_length": 23, "num_lines": 2, "path": "/config.py", "repo_name": "dylanwach/guitars_test_real", "src_encoding": "UTF-8", "text": "EMAIL = '[email protected]'\nPASSWORD = 'swimfast4Mom'" }, { "alpha_fraction": 0.5710041522979736, "alphanum_fraction": 0.578134298324585, "avg_line_length": 32, "blob_id": "db63f1298d8b00853f759e9496da8d172cae9ea5", "content_id": "d11dbc32fc9a883bf7cbeec05b363f9320b4fa78", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3366, "license_type": "no_license", "max_line_length": 125, "num_lines": 102, "path": "/scrape2.py", "repo_name": "dylanwach/guitars_test_real", "src_encoding": "UTF-8", "text": "from bs4 import BeautifulSoup\nfrom selenium import webdriver\nimport time\nimport re\nimport urllib.request\nimport urllib.parse\n\n\ndef get_links():\n req = urllib.request.Request(\"https://reverb.com/marketplace?product_type=electric-guitars&condition=used\")\n soup = BeautifulSoup(str(req), 'lxml')\n print(type(soup))\n print(soup)\n resp = urllib.request.urlopen(req)\n respData = resp.read()\n print(respData)\n all_links = re.findall(r'com/p/(.*?)&', str(respData))\n print(all_links)\n return all_links\n\n\ndef get_prices_model(link):\n req = urllib.request.Request(link)\n resp = urllib.request.urlopen(req)\n respData = resp.read()\n prices = re.findall(r'span class=\"price-display\">(.*?)</span', str(respData))\n print('Prices of same \"model: \" ' + str(prices))\n int_prices = []\n if len(prices) != 0:\n for i in prices:\n x = i.replace('$', '')\n x = x.replace(',', '')\n int_prices.append(float(x))\n a = sum(int_prices) / len(int_prices)\n print('Average \"model\": ' + str(a))\n\n\ndef get_prices_name(link):\n req = urllib.request.Request(link)\n resp = urllib.request.urlopen(req)\n respData = resp.read()\n prices = re.findall(r'span class=\"price-display\">(.*?)</span', str(respData))\n int_prices = []\n if len(prices) != 0:\n for i in prices:\n x = i.replace('$', '')\n x = x.replace(',', '')\n int_prices.append(float(x))\n a = sum(int_prices) / len(int_prices)\n print('Average name: ' + str(a))\n\n\ndef into_searchable(title1):\n title = title1.split(' ')\n link = 'https://reverb.com/marketplace?query='\n title = '%20'.join(title)\n link = link+title+'&show_only_sold=true'\n print(link)\n return link\n\n\ndef get_guitar(link):\n req = urllib.request.Request(link)\n resp = urllib.request.urlopen(req)\n respData = resp.read()\n title1 = re.findall(r'&quot;title&quot;:&quot;(.*?)&quot', str(respData))\n title2 = re.findall(r'</a></li></ul><h1>(.*?)<', str(respData))\n title = ''\n if len(title1) == 1:\n title = title1[0]\n else:\n title = title2[0]\n #print(soup)\n model = re.findall(r'Model</td><td data-spec-groups=\"true\"><ul class=\"collapsing-list collapsing-list--collapsed\">'\n r'<li class=\"collapsing-list__item\"><a href=\"(.*?)</a', str(respData))[0].split('\">')\n get_prices_model(model[0])\n print(title)\n link_search = into_searchable(title)\n get_prices_name(link_search)\n try:\n brand = re.findall(r'Brand</td><td data-spec-groups=\"true\"><ul class=\"collapsing-list collapsing-list--collapsed\">'\n r'<li class=\"collapsing-list__item\"><a class=\"\" href=\"(.*?)</a', str(respData))[0].split('\">')[1]\n print(brand)\n brands = ['SGN']\n if brand in brands:\n print('Not a guitar')\n except:\n print('no linked brand')\n try:\n finish = re.findall(r'Finish</td><td data-spec-groups=\"true\"><ul class=\"collapsing-list collapsing-list--collapsed\">'\n r'<li class=\"collapsing-list__item\"><a class=\"\" href=\"(.*?)</a', str(respData))[0].split('\">')[1]\n print(finish)\n except:\n print('no finish')\n\n\nif __name__ == '__main__':\n links = get_links()\n quit()\n for i in links[4:5]:\n get_guitar(i)\n print('\\n')\n" }, { "alpha_fraction": 0.7482014298439026, "alphanum_fraction": 0.7625899314880371, "avg_line_length": 14.55555534362793, "blob_id": "2bb16f570b4c84e27d5b6bbe76aea580e08a5f97", "content_id": "4157216ac4623c2bf8f738771f2835e05c39a77c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 139, "license_type": "no_license", "max_line_length": 48, "num_lines": 9, "path": "/automate.py", "repo_name": "dylanwach/guitars_test_real", "src_encoding": "UTF-8", "text": "import craig\nimport schedule\n\n\ncraig.check_craig()\nschedule.every(10).minutes.do(craig.check_craig)\n\nwhile True:\n schedule.run_pending()" } ]
3
ahsankhan530/Dash-Python
https://github.com/ahsankhan530/Dash-Python
f43a59206b112d6972b58c49a413ef73828479ac
a83be05013e6dfeec7e95adfa0d6c1723f474ca3
a04f75f7bb97c22f9209734c87d4671afd34cc7e
refs/heads/master
2020-05-03T11:43:07.741433
2019-04-09T13:54:04
2019-04-09T13:54:04
178,607,380
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6553524732589722, "alphanum_fraction": 0.6649260520935059, "avg_line_length": 31.380281448364258, "blob_id": "40c1e837edf14861199d9a81986a46fd3903f46c", "content_id": "22c5da17f18204339936f0d60989131bc36ca1b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2298, "license_type": "no_license", "max_line_length": 134, "num_lines": 71, "path": "/app.py", "repo_name": "ahsankhan530/Dash-Python", "src_encoding": "UTF-8", "text": "from matplotlib import pyplot as plt\nimport dash\nfrom flask import Flask,request,render_template\nfrom flask_sqlalchemy import SQLAlchemy\nfrom oauth2client.service_account import ServiceAccountCredentials\nimport gspread\nimport json\nfrom flask_sqlalchemy import SQLAlchemy\nimport pandas as pd\n\napp=Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:test123@localhost'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SECRET_KEY'] = b'dE\\xad2g\\x0c\\x8d\\xb9\\x1cq\\x86\\x04:\\xa8>\\xc7\\xc5\\xc2Dr\\xe7f\\xf9\\xeb'\n\ndb = SQLAlchemy(app)\n\nclass dreamer(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n Ref = db.Column(db.String(200))\n Month = db.Column(db.String)\n Date = db.Column(db.String)\n Amount = db.Column(db.String)\n category = db.Column(db.String)\n subcategory = db.Column(db.String)\n Detailed_Category = db.Column(db.String)\n Ad= db.Column(db.String)\n Paid= db.Column(db.String)\n Settled = db.Column(db.String)\n FIELD2 = db.Column(db.String)\[email protected]('/ss',methods=['POST'])\ndef index():\n data=request.get_json()\n# scope=['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive']\n# credationals=ServiceAccountCredentials.from_json_keyfile_name('placeapi-a3c5e000d8e2.json',scope)\n# gc=gspread.authorize(credationals)\n# wks=gc.open('ahsan').sheet1\n# g=wks.get_all_values()\n \n a=dreamer(Ref=data[\"Ref\"],Month=data['Month'],Date=data[\"Date\"],Amount=data[\"Amount\"],Detailed_Category=data['Detailed_Category'],\n subcategory=data['subcategory'], Ad=data['Ad'],Paid=data['Paid'],Settled=data['Settled'],FIELD2=data['FIELD2'])\n db.session.add(a)\n db.session.commit()\n return 'ok'\[email protected]('/',methods=['POST','GET'])\ndef add():\n i= dreamer.query.filter_by(id=1).first()\n db.session.commit()\n df=pd.Series([i.Ref,i.Month,i.Date,i.Amount,i.category,i.subcategory,i.Detailed_Category,i.Ad,i.Paid,i.Settled,i.FIELD2])\n x=df[0]\n y=df[1]\n plt.plot(data=df)\n \n plt.show()\n print(x,y)\n \n return 'ok'\n# for i,v in g:\n# a=dd(text=i,complete=v)\n# db.session.add(a)\n# db.session.commit()\n\n \n # wks.append_row([a.text])\n # db.session.add(a)\n # db.session.commit()\n \n\nif __name__ == \"__main__\":\n db.create_all()\n app.run(debug=True)" }, { "alpha_fraction": 0.6144238710403442, "alphanum_fraction": 0.6232876777648926, "avg_line_length": 23.33333396911621, "blob_id": "6f68af37834d643230155737f5b72c943606bc8f", "content_id": "f47eca75180eb5878b34e370e19a4a735ca1c4a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2482, "license_type": "no_license", "max_line_length": 134, "num_lines": 102, "path": "/index.py", "repo_name": "ahsankhan530/Dash-Python", "src_encoding": "UTF-8", "text": "import dash_core_components as dcc\nimport dash_html_components as html\nimport pandas as pd\nimport dash\nimport csv\n\n\nfrom flask import Flask,redirect,url_for,request\nimport flask\nfrom werkzeug.serving import run_simple\nimport dash_html_components as html\nfrom dash import Dash\nfrom werkzeug.wsgi import DispatcherMiddleware\nfrom flask_sqlalchemy import SQLAlchemy\nimport pandas as pd\n\n\napp = flask.Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:test123@localhost'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SECRET_KEY'] = b'dE\\xad2g\\x0c\\x8d\\xb9\\x1cq\\x86\\x04:\\xa8>\\xc7\\xc5\\xc2Dr\\xe7f\\xf9\\xeb'\n\n\n\ndb = SQLAlchemy(app)\n\nclass dreamer(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n Ref = db.Column(db.String(200))\n Month = db.Column(db.String)\n Date = db.Column(db.String)\n Amount = db.Column(db.String)\n category = db.Column(db.String)\n subcategory = db.Column(db.String)\n Detailed_Category = db.Column(db.String)\n Ad= db.Column(db.String)\n Paid= db.Column(db.String)\n Settled = db.Column(db.String)\n FIELD2 = db.Column(db.String)\n\[email protected]('/e',methods=['POST'])\ndef index():\n data=request.get_json()\n a=dreamer(Ref=data[\"Ref\"],Month=data['Month'],Date=data[\"Date\"],Amount=data[\"Amount\"],Detailed_Category=data['Detailed_Category'],\n subcategory=data['subcategory'], Ad=data['Ad'],Paid=data['Paid'],Settled=data['Settled'],FIELD2=data['FIELD2'])\n db.session.add(a)\n db.session.commit()\n \n return 'ok'\[email protected]('/reports',methods=['POST','GET'])\ndef add():\n \n global a\n add.a= dreamer.query.filter_by().all()\n db.session.commit() \n # for i in a:\n # global df\n # add.df=pd.Series([i.Amount,i.Month])\n \n return 'df'\nadd()\n# print(add.df)\n# df=add.df\ndash_app2 = Dash(__name__, server = app)\n\n# print(add.a)\nli=[]\nai=[]\nfor i in add.a:\n # df=pd.Series([i.Amount,i.Month])\n # a=df\n li.append(i.Month)\n ai.append(i.Amount)\nap={'a':li,'pi':ai}\ndf=pd.DataFrame(ap)\ny=df['a']\nx=df['pi']\n\n\n\ndash_app2.layout= html.Div(children=[\n html.H1(children='Data '),\n dcc.Graph(\n id='example',animate=True,\n figure={\n \n 'data': [\n \n {'x':y,'y':x,'type':'bar'}\n ],\n 'layout': {\n 'title': 'Month and Amount'\n }\n }\n )\n]) \n\napp = DispatcherMiddleware(app, {\n '/dash2': dash_app2.server\n})\n\nrun_simple('0.0.0.0', 8080, app, use_reloader=True, use_debugger=True)\n" } ]
2
newbiejasper/newbie_programming
https://github.com/newbiejasper/newbie_programming
88c4a432f878b518c3c705d7fc2d09735f0ea8b2
d6cd9d622f7df10603b3cd8d6ea177fbe9914947
b2aa6a2bf658fc5890900ba64ce9cb237a279bbc
refs/heads/master
2021-01-22T19:09:14.961720
2019-03-30T03:36:43
2019-03-30T03:36:43
85,171,155
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7335456609725952, "alphanum_fraction": 0.7441613674163818, "avg_line_length": 43.80952453613281, "blob_id": "813251ca7cd41aef6d533ff58ff6bc31a4121d4c", "content_id": "19a4aec547a0faba3e2a7a8ad79df4a646bccf57", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 956, "license_type": "no_license", "max_line_length": 152, "num_lines": 21, "path": "/python/ml/chapter2/prediction.py", "repo_name": "newbiejasper/newbie_programming", "src_encoding": "UTF-8", "text": "'''\nprediction:预测类别\n输入: \n'''\n\nimport numpy as np\nimport txtfile2matrix\nimport normalization\nimport kNN\n\ndef classifyPerson():\n resultList = ['not at all', 'in small doses', 'in large doses']\n percentTats = float(input(\"percentage of time spent playing video games?\"))\n ffMiles = float(input(\"frequent flier miles earned per year?\"))\n iceCream = float(input(\"liters of ice cream consumed per year?\"))\n datingDataMat,classLabelVector = txtfile2matrix.file2matrix(\"C:/Users/jasper/iCloudDrive/newbie_programming/python/ml/chapter2/datingTestSet.txt\",3)\n datingLabels = txtfile2matrix.char2int(classLabelVector)\n normMat, ranges, minVals = normalization.autoNorm(datingDataMat)\n inArr = np.array([ffMiles, percentTats, iceCream,],dtype=float)\n classifierResult = kNN.classify0((inArr - minVals)/ranges, normMat, datingLabels, 3)\n print(\"You will probably like this person: %s\" % (resultList[classifierResult - 1]))\n\n" }, { "alpha_fraction": 0.5787999033927917, "alphanum_fraction": 0.5953770875930786, "avg_line_length": 27.458471298217773, "blob_id": "34deca51f53246f14a67e4264208ccc0e5df894b", "content_id": "9996611747b58250b576addcf780f7d88bb2ae9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 13364, "license_type": "no_license", "max_line_length": 226, "num_lines": 301, "path": "/R/读取数据/R读取文件(1).md", "repo_name": "newbiejasper/newbie_programming", "src_encoding": "UTF-8", "text": "R语言读取文件(一)\n================\nnewbiejasper\n2017/3/25\n\n1.读取excel文件\n---------------\n\n读取excel文件文件其实有很多的包可以做到,最常用的就是xlsx和readxl, 首先,安装并载入读取excel文件所用的包:\n\n``` r\ninstall.packages(\"xlsx\")\nlibrary(xlsx)\n```\n\n或者,\n\n``` r\ninstall.packages(\"readxl\")\nlibrary(readxl)\n```\n\n### 两个包的基本介绍\n\n- xlsx是用R把excel文件中的工作表以data.frame的格式读入R语言,他有两个常用函数,其中read.xlsx2通常会比read.xlsx读取的速度更快一些,因为它的好多工作是利用Java完成的,所以速度有所提升,但是在读取子数据集的时候没有read.xlsx稳定性好。\n\nread.xlsx(file, sheetIndex, sheetName=NULL, rowIndex=NULL, startRow=NULL, endRow=NULL, colIndex=NULL, as.data.frame=TRUE, header=TRUE, colClasses=NA, keepFormulas=FALSE, encoding=\"unknown\", ...)\n\nread.xlsx2(file, sheetIndex, sheetName=NULL, startRow=1, colIndex=NULL, endRow=NULL, as.data.frame=TRUE, header=TRUE, colClasses=\"character\", ...)\n\n注:\n1. sheetIndex:工作表单号\n2. rowIndex:行号,就是你想读取那些行\n3. header:表头,就是有没有列名。比如姓名,年龄,学号等\n4. startRow:比如你想从第五行开始读取,就设置startRow = 5\n\n- readxl包可以用来读取xls和xlsx格式的文件\n\nread\\_excel(path, sheet = 1, col\\_names = TRUE, col\\_types = NULL, na = \"\", skip = 0)\n\n注:\n1. sheet:读取的工作表\n2. col\\_names: 如果是FALSE的话,就表示第一行不是列名,R会自动给你取成x1,x2...\n3. colt\\_types: blank,numeric,date,text\n4. na: 缺失值,默认空着的单元是缺失值,你也可以自己指定,比如认为999是缺失值\n\n2.读取XML文件\n-------------\n\n### XML文件简介\n\n在计算机领域,XML(extensible markup language)指的是可扩展标记语言,类似于HTML,它设计的宗旨是传输数据,而不是显示数据,所以这也是它和HTML的一个明显的差别。另外一个差别是XML的标签没有被预定义,我们可以根据自己的需要自行设计标签名字,所以具有自我描述性。\n\n### 一个具体的例子\n\n``` html\n<?xml version=\"1.0\" encoding=\"UTF-8\"?> \n<message> \n<to>房,姜\\</to> \n<from>高\\</from> \n<heading>午餐\\</heading> \n<content time=\"noon\">中午怎么吃?点外卖?小白房?还是食堂?\\</content> \n</message> \n```\n\n以上就是一个XML的例子,它拥有发送者和接受者,标题,内容等信息,所以自我描述非常清晰。但是这个文档实际上只是包装了一些数据信息,而并没有做任何传输、接收和显示文档的信息,也就说你可以把他当成一个纯文本。纯文本的特点会让你在不同的操作系统进行数据共享的门槛大大降低。\n\n### XML结构\n\n- XML文档是一种树结构,从根部开始扩展到枝叶。第1行是XML声明,它定义版本和编码。第2行是根元素,和第7行相对应,&lt;message&gt;叫做起始标签,&lt;/message&gt;叫做结束标签,结束标签有一个\"/\"符号。\n\n- XML文档必须包含根元素,所有元素都可以拥有子元素。\n\n- XML文档均可拥有文本内容和属性。例如content元素下,属性time为noon,属性值要加引号。\n\n- XML文档元素必须要有结束标签,标签对大小写敏感,而且必须正确嵌套,也就是说一个标签必须完整地嵌套在另一个标签里。\n\n开始读取文件,首先加载XML和RCurl包。\n\n``` r\nlibrary(XML)\nlibrary(RCurl)\n```\n\n ## Loading required package: bitops\n\n``` r\nurl <- \"https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Frestaurants.xml\"\nfile_url <- getURL(url)\ndoc <- xmlTreeParse(file_url,useInternal=TRUE)\nrootNode <- xmlRoot(doc)\n```\n\n注意:这里,url是https传输的,xmlTreeParse不支持,会报出不是一个XMl文档的错误,所以我们需要先用getURL处理一下,然后用xmlTreeParse函数读取,如果是http的话,就可以直接把url传入xmlTreeParse函数。现在你可以把doc想象成一棵大树,也就是XML文档的树结构,xmlRoot就是获取文档节点的函数。\n\n``` r\nxmlName(rootNode)\n```\n\n ## [1] \"response\"\n\n``` r\nnames(rootNode)\n```\n\n ## row \n ## \"row\"\n\n这里,我们通过xmlName函数获取根节点的名称是response,根节点下面包括叫做row的子节点。下面,我们分析一下如下的操作:\n\n``` r\nrootNode[[1]][[1]][[1]]\n```\n\n ## <name>410</name>\n\n我们在选择xml文档的节点时可以采用如上的类似于R语言中列表元素的索引。其中rootNode\\[\\[1\\]\\]我们选择的进入根节点response下面的第一个子节点,就是\"row\"节点,rootNode\\[\\[1\\]\\]\\[\\[1\\]\\]进入的就是row节点下的第一个子节点,也叫做row,rootNode\\[\\[1\\]\\]\\[\\[1\\]\\]\\[\\[1\\]\\],进入的是这个row节点下的第一个子节点就是name节点。所以我们获得了如上的name元素。\n\n### XPATH语言介绍\n\nXPATH是一门在XML文档中查找特定信息的语言,XPATH通过路径表达式,搜寻特定的元素和属性找到节点,这些路径表达式和我们电脑文件系统的路径表达式很相似。\n\n#### XPATH的节点(Node)\n\nXML文档是被作为节点树来对待的。我们通过XPATH语言查找特定的元素、属性和文本。树的根被称为文档节点或者根节点。\n\n请看下面这个 XML 文档:\n\n``` html\n<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<bookstore>\n<book>\n <title lang=\"zhong\">金融时间序列分析</title>\n <author>房小姜</author> \n <year>2017</year>\n <price>30¥</price>\n <publisher>PKU</publisher>\n</book>\n</bookstore>\n```\n\n上面的XML文档中的节点例子中&lt;bookstore&gt;是根节点,&lt;publisher&gt;PKU&lt;/publisher&gt;是元素节点 lang=\"zhong\"是属性节点, 基本值通常是在起始标签和结束标签之间的部分,像房小严,2017等都是基本值。\n\n- 节点关系\n\n - 每个元素以及属性都有一个父节点,在上面的例子中,book元素是title、author、year、price以及publisher元素的父。title、author、year以及publisher元素都是book元素的子元素。子元素节点可有零个、一个或多个子元素。\n\n - 在上面的例子中,title、author、year、price以及publisher都是同胞,属于同一等级。\n\n - 某节点的父节点、父节点的父节点等叫做先辈。在上面的例子中,title元素的先辈是book元素和bookstore元素。\n - bookstore的后代是book、title、author、year,publisher以及price元素。\n\n#### XPATH语法\n\n下面我们来介绍一下XPath的路径表达式。\n\n##### XML实例文档\n\n``` html\n<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<bookstore>\n<book>\n <title lang=\"en\">随机过程论</title>\n <price>30</price>\n</book>\n<book>\n <title lang=\"zh\">应用随机分析</title>\n <price>35</price>\n</book>\n</bookstore>\n```\n\n##### 选取节点\n\nXPath使用路径表达式在XML文档中选取节点。\n\n下面列出了一些常见的路径表达式:\n\n| 表达式 | 描述 |\n|----------|--------------------------|\n| nodename | 选取此节点的所有子节点。 |\n| / | 从根节点选取。 |\n| // | 选取所有的指定节点。 |\n| . | 选取当前节点。 |\n| .. | 选取当前节点的父节点。 |\n| @ | 选取属性。 |\n\n例如,在下面的表格中,我们已列出了一些路径表达式以及相应的表达式的结果:\n\n| 路径表达式 | 结果 |\n|-----------------|---------------------------------------------|\n| bookstore | 选取bookstore元素的所有子节点。 |\n| /bookstore | 选取根元素 bookstore。 |\n| bookstore/book | 选取属于bookstore的子元素中所有的book元素。 |\n| //book | 选取所有book元素。 |\n| bookstore//book | 选择属于bookstore元素的后代中所有book元素。 |\n| <//@lang> | 选取带有lang属性的所有元素。 |\n\n##### 谓语(Predicates)\n\n谓语用来查找某个特定的节点或者包含某个指定的值的节点。谓语被嵌在方括号中。在下面的表格中,我们列出了带有谓语的一些路径表达式,以及表达式的结果:\n\n<table style=\"width:99%;\">\n<colgroup>\n<col width=\"47%\" />\n<col width=\"51%\" />\n</colgroup>\n<thead>\n<tr class=\"header\">\n<th>路径表达式</th>\n<th>结果</th>\n</tr>\n</thead>\n<tbody>\n<tr class=\"odd\">\n<td>/bookstore/book[2]</td>\n<td>选取属于bookstore子元素的第二个book 元素。</td>\n</tr>\n<tr class=\"even\">\n<td>/bookstore/book[last()]</td>\n<td>选取属于bookstore子元素的最后一个book元素。</td>\n</tr>\n<tr class=\"odd\">\n<td>/bookstore/book[last()-1]</td>\n<td>选取属于bookstore子元素的倒数第二个book元素。</td>\n</tr>\n<tr class=\"even\">\n<td>/bookstore/book[position()&lt;3]</td>\n<td>选取最前面的两个属于bookstore元素的子元素的 book 元素。</td>\n</tr>\n<tr class=\"odd\">\n<td>//title<span class=\"citation\">[@lang]</span></td>\n<td>选取所有拥有名为lang的属性的title元素。</td>\n</tr>\n<tr class=\"even\">\n<td>//title<span class=\"citation\">[@lang='eng']</span></td>\n<td>选取所有title元素,且这些元素拥有值为eng的lang属性。</td>\n</tr>\n<tr class=\"odd\">\n<td>/bookstore/book[price&gt;35.00]</td>\n<td>选取bookstore元素的所有book元素,且其中的price 元素的值须大于 35.00。</td>\n</tr>\n<tr class=\"even\">\n<td>/bookstore/book[price&gt;35.00]/title</td>\n<td>选取 bookstore 元素中的 book 元素的所有 title 元素,且其中的price 元素的值须大于 35.00。</td>\n</tr>\n</tbody>\n</table>\n\n##### XPath的轴\n\n轴可定义相对于当前节点的节点集。\n\n| 轴名称 | 结果 |\n|--------------------|----------------------------------------------------------|\n| ancestor | 选取当前节点的所有先辈(父、祖父等)。 |\n| ancestor-or-self | 选取当前节点的所有先辈(父、祖父等)以及当前节点本身。 |\n| attribute | 选取当前节点的所有属性。 |\n| child | 选取当前节点的所有子元素。 |\n| descendant | 选取当前节点的所有后代元素(子、孙等)。 |\n| descendant-or-self | 选取当前节点的所有后代元素(子、孙等)以及当前节点本身。 |\n| following | 选取文档中当前节点的结束标签之后的所有节点。 |\n| namespace | 选取当前节点的所有命名空间节点。 |\n| parent | 选取当前节点的父节点。 |\n| preceding | 选取文档中当前节点的开始标签之前的所有节点。 |\n| preceding-sibling | 选取当前节点之前的所有同级节点。 |\n| self | 选取当前节点。 |\n\n实例如下:\n\n| 例子 | 结果 |\n|------------------------|--------------------------------------------------------------------|\n| child::book | 选取所有属于当前节点的子元素的book节点。 |\n| attribute::lang | 选取当前节点的lang属性。 |\n| child::\\* | 选取当前节点的所有子元素。 |\n| attribute::\\* | 选取当前节点的所有属性。 |\n| child::text() | 选取当前节点的所有文本子节点。 |\n| child::node() | 选取当前节点的所有子节点。 |\n| descendant::book | 选取当前节点的所有 book 后代。 |\n| ancestor::book | 选择当前节点的所有 book 先辈。 |\n| ancestor-or-self::book | 选取当前节点的所有 book 先辈以及当前节点(如果此节点是 book 节点) |\n| child::\\*/child::price | 选取当前节点的所有 price 孙节点。 |\n\n#### 用XPATH语言读取XML和html文档具体实例\n\n``` r\nzipcode <- xpathSApply(rootNode,\"//zipcode\",xmlValue)\nhead(zipcode)\n```\n\n ## [1] \"21206\" \"21231\" \"21224\" \"21211\" \"21223\" \"21218\"\n\n分析:这里采用的是xpathSApply函数,和R语言自带的apply函数很接近,这个语句的功能是遍历这个XML文档的所有节点,找到所有zipcode的节点,然后用xmlValue把值取出来,以向量的形式返回。我们可以通过head验证,发现结果确实是提取出了邮编zipcode。\n\n``` r\nxpathSApply(rootNode,'//row[@_id=\"1\"]',xmlValue)\n```\n\n ## [1] \"41021206Frankford2NORTHEASTERN\"\n\n分析:这条语句是遍历rootNode,寻找属性值\\_id=\"1\"的row元素,并返回个子节点的值,你可以看到他返回的name,zipcode,neighborhood元素都同时返回到一条字符串里了。\n" }, { "alpha_fraction": 0.6955307126045227, "alphanum_fraction": 0.7132216095924377, "avg_line_length": 38.77777862548828, "blob_id": "24345cf6c342c6f4dd85922da532e9b3544738e3", "content_id": "31f5bffd07e03ce03c1f1cd50a430b1ccf54b438", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1558, "license_type": "no_license", "max_line_length": 128, "num_lines": 27, "path": "/python/ml/chapter2/kNN.py", "repo_name": "newbiejasper/newbie_programming", "src_encoding": "UTF-8", "text": "'''\nkNN: k近邻算法\n输入: inX: 待分类的向量 (1xN)\n dataSet: M个N维向量组成的矩阵 (M*N)\n labels: 数据集标签 (1xM)\n k: 要比较的邻居个数 (奇数)\n \n输出: inx要分入的类别\n'''\n\nimport numpy as np\nimport operator #运算符模块,k邻近算法采用这个模块进行排序\n\n#k-近邻算法\ndef classify0(inX,dataSet,labels,k): #inX:输入向量;dataSet:训练数据集;labels:标记;k:最近邻居数\n dataSetSize = dataSet.shape[0] #训练数据集行数,样本个数\n diffMat = np.tile(inX,(dataSetSize,1))-dataSet #先沿着第0维重复dataSetSize次,再沿着第一维重复1次,变成和数据集一样的数组;\n sqDiffMat = diffMat**2\n sqDistances = sqDiffMat.sum(axis = 1) #按行求和\n distances = sqDistances**0.5 #距离\n sortedDistIndicies = distances.argsort() #升序排序,返回索引\n classCount = {} # 创建字典,将用于存放key+value\n for i in range(k):\n voteIlabel = labels[sortedDistIndicies[i]]\n classCount[voteIlabel] = classCount.get(voteIlabel,0)+1 #返回字典中key对应的value值,否则返回0;这句执行的结果就是voteIlabel这个key对应类别的个数value\n sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True) #items返回列表,要从大到小排序,按照operate获取的第二维数据\n return sortedClassCount[0][0] #第一个0为次数最多的类别在列表第一个元素,第二个0是key+value中的key,代表类别\n" }, { "alpha_fraction": 0.5821167826652527, "alphanum_fraction": 0.6094890236854553, "avg_line_length": 17.89655113220215, "blob_id": "db6c6be17a3d78e3279b96576b0a949ceecd2c3b", "content_id": "4f8af8b02e2251fd29ca732fdd6f0595396697bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 785, "license_type": "no_license", "max_line_length": 56, "num_lines": 29, "path": "/C++/Accelerated C++/Accelerated C++/main.cpp", "repo_name": "newbiejasper/newbie_programming", "src_encoding": "UTF-8", "text": "// Accelerated C++\n//\n// Created by newbiejasper on 2017/12/25.\n// Copyright © 2017年 newbiejasper. All rights reserved.\n\n//第0章\n\n//注释只能是一行\n/* 这个注释可以使多行,结束*/\n\n#include <iostream> //请求使用所指定的标准库iostream,输入输出流\n#include <string>\n\n//main函数是C++程序运行时,被调用来相应请求的函数\nint main() {\n // 花括号里是主函数语句\n \n //std是标准库iostreamm里的名称空间\n //std::cout指标准输出流,实现程序输出\n //std::endl输出语句行的结束\n std::cout << \"请输入你的名字:\";\n \n std::string name;\n std::cin >> name;\n \n std::cout << \"hello,\" << name << \"!\" << std::endl;\n //返回值为0,代表运行成功\n return 0;\n}\n" }, { "alpha_fraction": 0.6186440587043762, "alphanum_fraction": 0.6627118587493896, "avg_line_length": 15.800000190734863, "blob_id": "06e3c4c69892e7ba05289568b3c60971336c6e66", "content_id": "0b044dc01501db50ea5483cd78349163420bf4f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 994, "license_type": "no_license", "max_line_length": 46, "num_lines": 35, "path": "/python/os模块常用命令.md", "repo_name": "newbiejasper/newbie_programming", "src_encoding": "UTF-8", "text": "# python os模块的常用命令\n\n1. os.name( )——判断操作系统\n\n2. os.getcwd( )——当前工作目录\n\n3. os.listdir( )——列出指定目录下所有的文件和目录名。\n\n4. os.remove( )—-删除指定文件\n\n5. os.rmdir( )——删除指定目录\n\n6. os.mkdir( )——创建目录;os.makedirs()以回归方式建立多层级目录\n\n7. os.path.isfile( )——判断指定对象是否为文件\n\n8. os.path.isdir( )——判断指定对象是否为目录 \n\n9. os.path.exists( )——检验指定的对象是否存在。\n\n10. os.path.split( )——返回路径的目录和文件名。\n\n11. os.system( )——执行终端命令\n\n13. os.chdir( )——改变目录到指定目录\n\n14. os.path.getsize( )——获得文件的大小,如果为目录,返回0\n\n15. os.path.abspath( )——获得绝对路径。例\n\n16. os.path.join(path, name)——连接目录和文件名\n\n17.os.path.basename(path)——返回文件名\n\n18. os.path.dirname(path)——返回文件路径\n\n\n" }, { "alpha_fraction": 0.7329699993133545, "alphanum_fraction": 0.7411444187164307, "avg_line_length": 18.210525512695312, "blob_id": "76fcdae95b436ca6026cd9ebe6d1dbc8d776cc63", "content_id": "ae89aaa2335ebaac6fe00d4fa027939c2a63fa93", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 601, "license_type": "no_license", "max_line_length": 76, "num_lines": 19, "path": "/R/读取数据/读取文本文件.md", "repo_name": "newbiejasper/newbie_programming", "src_encoding": "UTF-8", "text": "# 读取txt文件\n\n## R语言读取txt文件\n\n示例文件test.txt\n\n![test.ong](https://github.com/newbiejasper/images/blob/master/R语言/test.png)\n\n这是用空格作为分隔符的txt文本文件\n\n```R\na = read.table('test.txt',sep=' ',header = TRUE)\n```\n1. 文件如果在工作目录下,则直接写文件名称即可,否则输入的应当是文件的绝对路径。\n2. sep参数设置的分隔符\n3. header设置的是有无表头\n\n> 注意如果出现下列警告:incomplete final line found by readTableHeader on......\n> 只需要在文本文件的最后一行按下回车键即可。\n\n\n" }, { "alpha_fraction": 0.7423076629638672, "alphanum_fraction": 0.7423076629638672, "avg_line_length": 64, "blob_id": "1589f5acf7254ce8b12c1e2a0c679b220f05692e", "content_id": "d923fbd3b1bbc7a79d23e1e854891dc864723b52", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 260, "license_type": "no_license", "max_line_length": 219, "num_lines": 4, "path": "/readme.txt", "repo_name": "newbiejasper/newbie_programming", "src_encoding": "UTF-8", "text": "Hello, everyone.\n I am jasper. I am just a newbie in the world of programming. However, as a crazy lover of coding, I have been on the way. In order for my convenience and communication with others, I create my repository in github.\n\nMy story begins......\n" }, { "alpha_fraction": 0.7631579041481018, "alphanum_fraction": 0.8117408752441406, "avg_line_length": 40.25, "blob_id": "efa632b2b6a74ba0afd07a63268b22b366014dd6", "content_id": "35f629109142e4b8dbbe76a25cc5a544740f6c45", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 542, "license_type": "no_license", "max_line_length": 148, "num_lines": 12, "path": "/python/ml/chapter2/descriptiveAnalysis.py", "repo_name": "newbiejasper/newbie_programming", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport txtfile2matrix\n\ndatingDataMat,classLabelVector = txtfile2matrix.file2matrix(\"C:/Users/jasper/iCloudDrive/newbie_programming/python/ml/chapter2/datingTestSet.txt\",3)\ndatingLabels = txtfile2matrix.char2int(classLabelVector)\n\nfig = plt.figure() #创建一个新图形\nax = fig.add_subplot(111) #例如349,则把画布分成3行4列,在第9块画图\nax.scatter(datingDataMat[:,1],datingDataMat[:,2],15.0*np.array(datingLabels),15.0*np.array(datingLabels))\nplt.show()" }, { "alpha_fraction": 0.7033898234367371, "alphanum_fraction": 0.7139830589294434, "avg_line_length": 26.823530197143555, "blob_id": "b1bcdc08e0b7ca9c431bdb443ec5787ccdc44929", "content_id": "21f4b009ed329f8d65f870260b23c6a186d11639", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 606, "license_type": "no_license", "max_line_length": 72, "num_lines": 17, "path": "/python/ml/chapter2/normalization.py", "repo_name": "newbiejasper/newbie_programming", "src_encoding": "UTF-8", "text": "'''\nnormalization:数值归一化\n输入:dataSet\n输出:normDataSet(归一化之后的数据)\n'''\n\nimport numpy as np\n\ndef autoNorm(dataSet):\n minVals = dataSet.min(0) #列的最小值\n maxVals = dataSet.max(0) #列的最大值\n ranges = maxVals-minVals\n normDataSet = np.zeros(np.shape(dataSet)) #创建和原数据集尺寸相当的数组\n m = dataSet.shape[0] #dataSet的行数\n normDataSet = dataSet-np.tile(minVals,(m,1)) #把最小值的数组沿着行方向重复m次,列方向一次\n normDataSet = normDataSet/np.tile(ranges,(m,1)) \n return(normDataSet,ranges,minVals)" }, { "alpha_fraction": 0.4060150384902954, "alphanum_fraction": 0.4736842215061188, "avg_line_length": 7.090909004211426, "blob_id": "dd7d1ff5649f7c2e84c60a0220d508670c3de0c1", "content_id": "1c67363807deee81e3e37db709dc105c79c8b007", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 400, "license_type": "no_license", "max_line_length": 29, "num_lines": 33, "path": "/study/markdown.md", "repo_name": "newbiejasper/newbie_programming", "src_encoding": "UTF-8", "text": "# Markdown Learning Materials\n\n## 语法\n\n### 段落\n\n段落的前后必须是空行 \n\n### 标题\n\n用====和----触发setext形式的标题\n\n### 引用\n\n> 此处的大于符号表示引用\n> 直接回车多行引用\n> > 嵌套引用\n> \n>\n> shift+enter 实现引用内换行\n\n### 列表\n\n1. 第一层\n * 1-1\n + 1-2\n * 1-2-1\n * 1-2-2\n2. 第二层\n 1. 2-1\n 2. 2-2\n\n### 代码" }, { "alpha_fraction": 0.6423357725143433, "alphanum_fraction": 0.6523722410202026, "avg_line_length": 30.941177368164062, "blob_id": "3a8225b16e7dce7e98c2393754779585fce7dd5c", "content_id": "ce200b341a5fc1a919aa5eb4913abf7d37493712", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1378, "license_type": "no_license", "max_line_length": 68, "num_lines": 34, "path": "/python/ml/chapter2/txtfile2matrix.py", "repo_name": "newbiejasper/newbie_programming", "src_encoding": "UTF-8", "text": "'''\nfile2matrix:将txt文件,以dataframe样子出现的的数据转化为矩阵和标签向量\n输入:filename文件名,m是特征数\n输出:returnMat训练样本矩阵,classLabelVector类标签向量\n\n'''\nimport numpy as np\n\ndef file2matrix(filename,m):\n fr = open(filename)\n arrayOLines = fr.readlines() #逐行读取,以列表的形式返回\n numberOfLines = len(arrayOLines) #文件行数\n returnMat = np.zeros((numberOfLines,m)) #用于存放样本数据的矩阵\n classLabelVector = [] #用于存放标签向量\n index = 0\n for line in arrayOLines:\n line = line.strip() #截取掉所有的回车字符\n listFromLine = line.split('\\t') #按照'\\t'分割,返回列表,但是都是字符串\n returnMat[index,:] = listFromLine[0:m] #取前三列\n classLabelVector.append(listFromLine[-1]) #-1表示每行最后一个数,是标签数据\n index += 1\n return(returnMat,classLabelVector)\n\n###把classLabelVector的字符串标签改为数字\ndef char2int(classLabelVector):\n for i in range(len(classLabelVector)):\n if classLabelVector[i] == \"largeDoses\":\n classLabelVector[i] = 3\n else:\n if classLabelVector[i] == \"smallDoses\":\n classLabelVector[i] = 2\n else:\n classLabelVector[i] = 1\n return(classLabelVector)\n \n\n" }, { "alpha_fraction": 0.2687895596027374, "alphanum_fraction": 0.5357990264892578, "avg_line_length": 20.514894485473633, "blob_id": "bf4520eaaccf66cce3317b85d6be0e9a2efe39c1", "content_id": "0024956ffad57fad46f8b2bf4655d14b5e26af10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6366, "license_type": "no_license", "max_line_length": 251, "num_lines": 235, "path": "/R/读取数据/data_table_package.md", "repo_name": "newbiejasper/newbie_programming", "src_encoding": "UTF-8", "text": "data.table包\n================\nnewbiejasper\n2017/3/28\n\ndata.table包是干什么的\n======================\n\nR语言里有data.frame的数据结构。data.table是data.frame的一个扩展,它几乎继承了所有的data.frame特性,我们通常对data.frame作出的操作,对于data.table同样也是有效的。那么data.table存在的价值是什么呢?它是由C语言写的,所以运行速度快,数据存储效率高,像取子集,变量分组,更新变量等操作都要优于data.frame,而且对于大型数据集的支持比较好。所以,如果你还没有学习过data.frame,可以直接越过它直接学习data.table。\n\n实例中学习\n==========\n\n创建数据集\n----------\n\n创建data.frame\n\n``` r\ndf <- data.frame(x=rnorm(9),y=rep(c('a','b','c'),each=3),z=rnorm(9))\nhead(df,n=3)\n```\n\n ## x y z\n ## 1 -2.3073716 a 1.5705973\n ## 2 0.5453968 a -1.0604976\n ## 3 0.3161931 a -0.6694775\n\n创建data.table\n\n``` r\nlibrary(data.table)\ndt <- data.table(x=rnorm(9),y=rep(c('a','b','c'),each=3),z=rnorm(9))\nhead(dt,n=3)\n```\n\n ## x y z\n ## 1: 0.2215434 a -0.036959138\n ## 2: 0.1305816 a -0.000746413\n ## 3: -0.4680529 a -0.416177115\n\n查看内存下所有的data table的情况\n\n``` r\ntables()\n```\n\n ## NAME NROW NCOL MB COLS KEY\n ## [1,] dt 9 3 1 x,y,z \n ## Total: 1MB\n\n像data.frame一样进行操作\n------------------------\n\n``` r\ndt[2,] #取第二行,所有列\n```\n\n ## x y z\n ## 1: 0.1305816 a -0.000746413\n\n``` r\ndt[dt$y==\"a\",] #取dt表中y列取值为\"a\"的所有行\n```\n\n ## x y z\n ## 1: 0.2215434 a -0.036959138\n ## 2: 0.1305816 a -0.000746413\n ## 3: -0.4680529 a -0.416177115\n\n和data.frame的区别\n------------------\n\n``` r\ndt[c(2,3)] #data.table中索引没有逗号,默认是按行取,这里取出的是第2,3行\n```\n\n ## x y z\n ## 1: 0.1305816 a -0.000746413\n ## 2: -0.4680529 a -0.416177115\n\n``` r\ndf[c(2,3)] #data.frame没有逗号默认是按列取得,这里取出了第2,3列\n```\n\n ## y z\n ## 1 a 1.5705973\n ## 2 a -1.0604976\n ## 3 a -0.6694775\n ## 4 b -0.8039125\n ## 5 b 1.2905721\n ## 6 b 2.0818057\n ## 7 c 1.5427769\n ## 8 c -1.5365087\n ## 9 c -0.1860371\n\n取子集操作\n----------\n\n1. data.table包用来取子集的函数与R中常用数据类型有些不同。\n2. 它采用:逗号+expression 的表达方式。\n3. 一个 expression 是指包含在一对花括号里的一系列语句。\n\n### 例1\n\n设想你想要求出x这一列的均值,z这一列的总和:\n\n``` r\ndt[,list(mean(x),sum(z))]\n```\n\n ## V1 V2\n ## 1: 0.3370235 -1.361172\n\n### 例2\n\n设想你想要对y变量各种值出现的频次做个统计\n\n``` r\ndt[,table(y)]\n```\n\n ## y\n ## a b c \n ## 3 3 3\n\n### 例3\n\n设想你想生成新的一列,新列是z那一列的平方\n\n``` r\ndt[,w:=z^2]\n```\n\n这里用 data.table,而不是用 data.frame 的好处就体现出来了,data.table 是直接在原来的表里添加新的一列,而 data.frame 要重新生成一个表,再把新列加进去,所以当数据集很大时,非常占内存,速度也会下降。\n\n### 例4\n\n``` r\ndt2 <- dt\ndt[,y:=2]\nhead(dt,3)\n```\n\n ## x y z w\n ## 1: 0.2215434 2 -0.036959138 1.365978e-03\n ## 2: 0.1305816 2 -0.000746413 5.571323e-07\n ## 3: -0.4680529 2 -0.416177115 1.732034e-01\n\n``` r\nhead(dt2,3)\n```\n\n ## x y z w\n ## 1: 0.2215434 2 -0.036959138 1.365978e-03\n ## 2: 0.1305816 2 -0.000746413 5.571323e-07\n ## 3: -0.4680529 2 -0.416177115 1.732034e-01\n\n这里,我们把dt赋值给了dt2,然后修改了dt的值,发现dt2的值也被修改了。也就是说dt和dt2在内存中占用的是同一个地方,并没有做到真正的复制。这里我们需要用 copy 函数来进行真正的复制。\n\n``` r\ndt3 <- copy(dt)\ndt[,y:=3]\nhead(dt,n=3)\n```\n\n ## x y z w\n ## 1: 0.2215434 3 -0.036959138 1.365978e-03\n ## 2: 0.1305816 3 -0.000746413 5.571323e-07\n ## 3: -0.4680529 3 -0.416177115 1.732034e-01\n\n``` r\nhead(dt3,n=3)\n```\n\n ## x y z w\n ## 1: 0.2215434 2 -0.036959138 1.365978e-03\n ## 2: 0.1305816 2 -0.000746413 5.571323e-07\n ## 3: -0.4680529 2 -0.416177115 1.732034e-01\n\n多条expression的情形\n--------------------\n\n这里我们创建一个叫做 m 的新列,这个表达式包含两个语句,放在一个花括号里,语句之间用分号隔开,最后的返回值就是新列的值。\n\n``` r\ndt[,m:={tmp=x+z;log(abs(tmp)+2)}]\nhead(dt,n=3)\n```\n\n ## x y z w m\n ## 1: 0.2215434 3 -0.036959138 1.365978e-03 0.7814255\n ## 2: 0.1305816 3 -0.000746413 5.571323e-07 0.7560446\n ## 3: -0.4680529 3 -0.416177115 1.732034e-01 1.0592580\n\n逻辑值的情形\n------------\n\n在对数据进行分组时,常常依据一些判断条件,例如下面对 a 的正负性进行分组。\n\n``` r\ndt[,a:=x>0]\nhead(dt)\n```\n\n ## x y z w m a\n ## 1: 0.22154342 3 -0.036959138 1.365978e-03 0.7814255 TRUE\n ## 2: 0.13058158 3 -0.000746413 5.571323e-07 0.7560446 TRUE\n ## 3: -0.46805293 3 -0.416177115 1.732034e-01 1.0592580 FALSE\n ## 4: -0.04218963 3 0.006306522 3.977222e-05 0.7109297 FALSE\n ## 5: 0.83961783 3 0.417750685 1.745156e-01 1.1809197 TRUE\n ## 6: -1.00831444 3 -1.070324894 1.145595e+00 1.4057634 FALSE\n\n分组之后就可以求出不同组的一些特征,例如\n\n``` r\ndt[,b:=mean(x+w),by=a]\nhead(dt)\n```\n\n ## x y z w m a b\n ## 1: 0.22154342 3 -0.036959138 1.365978e-03 0.7814255 TRUE 1.29951613\n ## 2: 0.13058158 3 -0.000746413 5.571323e-07 0.7560446 TRUE 1.29951613\n ## 3: -0.46805293 3 -0.416177115 1.732034e-01 1.0592580 FALSE -0.06657282\n ## 4: -0.04218963 3 0.006306522 3.977222e-05 0.7109297 FALSE -0.06657282\n ## 5: 0.83961783 3 0.417750685 1.745156e-01 1.1809197 TRUE 1.29951613\n ## 6: -1.00831444 3 -1.070324894 1.145595e+00 1.4057634 FALSE -0.06657282\n\n``` r\ndt[,.N,by=a]\n```\n\n ## a N\n ## 1: TRUE 6\n ## 2: FALSE 3\n" }, { "alpha_fraction": 0.6823647022247314, "alphanum_fraction": 0.7004008293151855, "avg_line_length": 18.959999084472656, "blob_id": "e6157b4104f4fc1c17d1878ab6e12cca781421ad", "content_id": "5b19a6d0de5052b91b76d7fae7015a38cd2558f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1654, "license_type": "no_license", "max_line_length": 158, "num_lines": 50, "path": "/R/读取数据/R下载文件.md", "repo_name": "newbiejasper/newbie_programming", "src_encoding": "UTF-8", "text": "用R语言进行文件下载\n================\nnewbiejasper\n2017年3月24日\n\n1. 检查文件路径是否存在\n-----------------------\n\n- 在开始下载文件之前,你首先应该知道自己工作目录,可以通过下面的代码打印出自己的working directory.\n\n``` r\ngetwd()\n```\n\n- 为了从网上下载文件,我们一般都会将文件保存在我们的工作目录下,或者在工作目录下新建一个文件夹,用于存放下载的数据文件,所以我们需要先确保你要给数据文件的命名不能和其他文件重复。\n\n``` r\nfile.exists(\"data\")\n```\n\n ## [1] FALSE\n\n- **FALSE**意味着没有叫做data的文件,所以我们可以很放心的新建叫做data的文件夹。\n\n``` r\ndir.create(\"data\")\n```\n\n2. 开始下载文件\n---------------\n\n下载文件,我们需要用到的是downlaod.file()命令,它的主要参数如下:\n\\* url:文件下载链接的字符串。\n\\* destfile:目标文件,决定存放你下载的文件的位置。\n\\* method:下载文件的方法,注意在mac上遇到以https开头的链接要采用curl的method\n\n3. 具体例子\n-----------\n\n这里我们以[Baltimore网站](https://data.baltimorecity.gov/Transportation/Baltimore-Fixed-Speed-Cameras/dz54-2aru)为例,在Export下有很多可供选择的下载格式CSV,Json,Xml等,右击复制下载链接,复制给url。\n\n``` r\nurl = \"https://data.baltimorecity.gov/api/views/dz54-2aru/rows.csv?accessType=DOWNLOAD\"\ndownload.file(url,destfile = \"./data/camera.csv\",method='curl')\nlist.files(\"./data\")\n```\n\n ## [1] \"camera.csv\"\n\n这里出现camera.csv表示data文件夹下已经成功下载了文件。\n" }, { "alpha_fraction": 0.5947473645210266, "alphanum_fraction": 0.6200132966041565, "avg_line_length": 33.574710845947266, "blob_id": "ddfd8c6e096aec5197c45f6aca94bf062dd90294", "content_id": "67a6af45b50fd85b3eb7c42a0a997ce85cc3ca4b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3910, "license_type": "no_license", "max_line_length": 201, "num_lines": 87, "path": "/R/读取数据/R读取文件之_2_.md", "repo_name": "newbiejasper/newbie_programming", "src_encoding": "UTF-8", "text": "R读取文件之(2)\n================\nnewbiejasper\n2017/3/27\n\nR读取JSON文件\n=============\n\nJSON格式简介\n------------\n\nJSON(JavaScript Object Notation)是JavaScript的对象表示法。它类似于XML语言,是用于存储和交换数据的语法,它的特点是比XML更小更快,也就更容易解析。JSON语法是JavaScript语法的子集。数据在名称/值对中,数据由逗号分隔,花括号保存对象,方括号保存数组;名称/值对包括字段名称,后面写一个冒号,然后是值。数据被存储为数字,字符串,逻辑值,数组,对象。\n\n下面以我从github的API获得的自己的信息为例:\n\n``` json\n{\n \"login\": \"newbiejasper\",\n \"id\": 11181943,\n \"avatar_url\": \"https://avatars1.githubusercontent.com/u/11181943?v=3\",\n \"gravatar_id\": \"\",\n \"url\": \"https://api.github.com/users/newbiejasper\",\n \"html_url\": \"https://github.com/newbiejasper\",\n \"followers_url\": \"https://api.github.com/users/newbiejasper/followers\",\n \"following_url\": \"https://api.github.com/users/newbiejasper/following{/other_user}\",\n \"gists_url\": \"https://api.github.com/users/newbiejasper/gists{/gist_id}\",\n \"starred_url\": \"https://api.github.com/users/newbiejasper/starred{/owner}{/repo}\",\n \"subscriptions_url\": \"https://api.github.com/users/newbiejasper/subscriptions\",\n \"organizations_url\": \"https://api.github.com/users/newbiejasper/orgs\",\n \"repos_url\": \"https://api.github.com/users/newbiejasper/repos\",\n \"events_url\": \"https://api.github.com/users/newbiejasper/events{/privacy}\",\n \"received_events_url\": \"https://api.github.com/users/newbiejasper/received_events\",\n \"type\": \"User\",\n \"site_admin\": false,\n \"name\": null,\n \"company\": null,\n \"blog\": null,\n \"location\": null,\n \"email\": null,\n \"hireable\": null,\n \"bio\": null,\n \"public_repos\": 2,\n \"public_gists\": 0,\n \"followers\": 0,\n \"following\": 0,\n \"created_at\": \"2015-02-24T19:33:54Z\",\n \"updated_at\": \"2017-03-07T23:28:56Z\"\n}\n```\n\n整个对象写在一个大花括号里,\"login\"是名称,\"newbiejasper\"是它对应的值,形成一个名称/值对,中间用冒号隔开。和下一个名称/值对中间用逗号隔开。有时候,对一个名称对应可能有很多值,这个时候,为了保持数据的对应性,就把这个名称对应的所有值放到同一个数组中,用中括号括起来,就表示一个数组。\n\n读取文件\n--------\n\n``` r\nlibrary(jsonlite)\njsonData <- fromJSON(\"https://api.github.com/users/newbiejasper\")\nnames(jsonData)\n```\n\n ## [1] \"login\" \"id\" \"avatar_url\" \n ## [4] \"gravatar_id\" \"url\" \"html_url\" \n ## [7] \"followers_url\" \"following_url\" \"gists_url\" \n ## [10] \"starred_url\" \"subscriptions_url\" \"organizations_url\" \n ## [13] \"repos_url\" \"events_url\" \"received_events_url\"\n ## [16] \"type\" \"site_admin\" \"name\" \n ## [19] \"company\" \"blog\" \"location\" \n ## [22] \"email\" \"hireable\" \"bio\" \n ## [25] \"public_repos\" \"public_gists\" \"followers\" \n ## [28] \"following\" \"created_at\" \"updated_at\"\n\n这里,我们先载入jsonlite包,然后把JSON文件的URL传给fromJSON函数,这个函数会返回一个列表,包含JSON文件的各个组件,这里我们用names函数,把列表组件的名称取出来,下面我们可以试验一下它究竟是不是列表?\n\n``` r\njsonData$url\n```\n\n ## [1] \"https://api.github.com/users/newbiejasper\"\n\n我们在这里用列表取组件的美元$符号直接拿出了url的对应值,所以jsonData确实是一个列表。\n\n我们有时还会用到的就是把R语言的data.frame转换成JSON格式使用,以R自带的iris数据集为例:\n\n``` r\ntoJSON(iris,pretty = TRUE)\n```\n" }, { "alpha_fraction": 0.5304042100906372, "alphanum_fraction": 0.5521968603134155, "avg_line_length": 26.62135887145996, "blob_id": "4e30f48a82ef7ba6d709002339711ff37c6601ec", "content_id": "499ad0177bcef8880df381b5a12a2f9e21d11089", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3617, "license_type": "no_license", "max_line_length": 248, "num_lines": 103, "path": "/R/读取数据/从_MySQL_读取数据.md", "repo_name": "newbiejasper/newbie_programming", "src_encoding": "UTF-8", "text": "从 mysql 读取数据\n================\nnewbiejasper\n2017/4/9\n\nMySQL是什么东西?\n=================\n\n1. 免费的且广泛使用的开源数据库软件\n2. 广泛应用于基于网络的应用\n3. 数据被存储在数据库(data.table)中,数据库中存在着很多的表(table),表包含列(column)和行(row),列也叫做字段(fields),行也叫做记录(record),列一般都是作为变量名的,例如姓名,性别,id等。\n\n需要的R语言包\n=============\n\n``` r\nlibrary(\"RMySQL\")\n```\n\n ## Loading required package: DBI\n\n本文连接的数据库\n================\n\n[一个具体的面向网页的MySQL数据库](http://genome.ucsc.edu/goldenPath/help/mysql.html)。这里告诉我们 连接它数据库的方式为:\nmysql --user=genome --host=genome-mysql.soe.ucsc.edu -A\n\n开始使用 R语言进行连接\n======================\n\n``` r\nucscdb <- dbConnect(MySQL(),user=\"genome\",host=\"genome-mysql.soe.ucsc.edu\")\nresult <- dbGetQuery(ucscdb,\"SHOW DATABASES;\")\ndbDisconnect(ucscdb)\n```\n\n ## [1] TRUE\n\n这里,给出一些具体说明:\n\\* MySQL( )代表的是dbConnect函数连接的数据库类型,可以是oracle,mysql等\n\\* user 是用户名,host 是数据库的网络位置,如果是本地的,需要的参数是 password\n\\* dbGetQuery是数据库查询命令,\"SHOW DATABASES;\"是数据库查询命令,遵循的是Mysql语法和R 语言没有关系。\n\\* 查询结束之后注意断开连接,会返回一个 TRUE 值\n\\* result 里包含了这个host下所有的数据库 database\n\n下面我们进入某一个数据库\n------------------------\n\n``` r\nhg19 <- dbConnect(MySQL(),user=\"genome\",host=\"genome-mysql.soe.ucsc.edu\",db=\"hg19\")\nall_tables <- dbListTables(hg19)\nlength(all_tables)\n```\n\n ## [1] 11048\n\n运行结束之后,我们知道hg19这个数据库里有11048张表。\n\n获取表的信息\n------------\n\n我想知道某一个表有多少列?\n\n``` r\ndbListFields(hg19,\"acemblyPep\")\n```\n\n ## [1] \"name\" \"seq\"\n\n然后,我又想把知道这个表有多少行?\n\n``` r\ndbGetQuery(hg19,\"select count(*) from acemblyPep\")\n```\n\n ## count(*)\n ## 1 187692\n\n我想能不能看看这个表具体长啥样?\n\n``` r\ntable <- dbReadTable(hg19,\"acemblyPep\")\nhead(table,n=3)\n```\n\n ## name\n ## 1 A1BGAS.aAug10\n ## 2 A1BGAS.bAug10\n ## 3 A1BGAS.cAug10\n ## seq\n ## 1 LRRRRAAPAAFTPRTSAPHVTPAETAPVRLLFPPPPAPGTQTPGGLTPQQEKDHEHGHDGRAHSGAVSVWVMDPRTCSRRRR\n ## 2 MAGTQTPGGLTPQQEKDHEHGHDGRAHSGAVSVWVMDPRTCSRRRR\n ## 3 MGDAGAVRRSRGDQELRRLQSDCRAHRRDEDQLGTAAALASAVRDGELWRLPLFGPISILLSTCCMLSVLLRASTWMEAVCSGWTGGECILCRRDNQVLRPEVITRPGALRQGLVARPEEQSSGCAQNSEVRPLNSDRTFQPIGNEAAQAARGLVKSEVCRDGAVILCFLWQSQHQPRCTLLLASLGSPALRVVAASCKYPALRFCNIHFCSLSLAKPAQSVPNLYPLCLKYLVWFLFP\n\n记得查询完之后\n\n``` r\ndbDisconnect(hg19)\n```\n\n ## [1] TRUE\n\n[RMySQL文档](https://cran.r-project.org/web/packages/RMySQL/RMySQL.pdf) [SQL查询命令](http://www.pantz.org/software/mysql/mysqlcommands.html) [其他参考资料](https://www.r-bloggers.com/mysql-and-r/)\n" }, { "alpha_fraction": 0.7033708095550537, "alphanum_fraction": 0.71685391664505, "avg_line_length": 36.08333206176758, "blob_id": "8d4fe10428f7243588d3fe0889ae7a48ce616f74", "content_id": "c9fab871e14f2211cff04972d61af918a4d8997b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 984, "license_type": "no_license", "max_line_length": 109, "num_lines": 24, "path": "/python/ml/chapter2/datingClassTest.py", "repo_name": "newbiejasper/newbie_programming", "src_encoding": "UTF-8", "text": "'''\n测试kNN分类算法的分类效果\n输入: filename:原始数据文件名\n m:特征数\n hoRatio:数据集中百分之多少用来作Test\n输出:分类错误率\n'''\nimport txtfile2matrix\nimport normalization\nimport kNN\n\ndef datingClassTest(filename,m,hoRatio):\n datingDataMat,classLabelVector = txtfile2matrix.file2matrix(filename,m)\n datingLabels = txtfile2matrix.char2int(classLabelVector)\n normMat,ranges,minVals = normalization.autoNorm(datingDataMat)\n m = normMat.shape[0] #样本个数\n numTestVecs = int(m*hoRatio)\n errorCount = 0.0\n for i in range(numTestVecs):\n classifierResult = kNN.classify0(normMat[i,:],normMat[numTestVecs:m,:],datingLabels[numTestVecs:m],3)\n print(\"the classifier came back with:%d,the real answer is:%d\" %(classifierResult,datingLabels[i]))\n if(classifierResult != datingLabels[i]):\n errorCount += 1.0\n print(\"the total error rate is:%f\" %(errorCount/float(numTestVecs)))\n" } ]
16
samy19980109/KasaSafe
https://github.com/samy19980109/KasaSafe
e2c54360ea1b2442c84d5ca4bc176a2211c70f53
2b00528548423e10da7e5a70c79140247ad01269
a1b0ee51e75920d7a5036f5dbc867b1a8ffdd21c
refs/heads/master
2020-06-26T05:07:21.552344
2019-07-30T00:29:12
2019-07-30T00:29:12
199,541,904
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.47807228565216064, "alphanum_fraction": 0.4954216778278351, "avg_line_length": 29.323530197143555, "blob_id": "28703ce63124dd5b22e891408132b2f72a493e47", "content_id": "c89667cd60782f2ad57f0ff53c11572ac798b5ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2075, "license_type": "no_license", "max_line_length": 152, "num_lines": 68, "path": "/main.py", "repo_name": "samy19980109/KasaSafe", "src_encoding": "UTF-8", "text": "from drowsy_driver import drowsiness_detector\nfrom routing import routing\nfrom send_texts import send_texts \nfrom threading import Thread\nimport os\n\nif __name__ == '__main__':\n stat1 = False\n stat2 = False\n stat3 = False\n\n #user_num = input('What is your phone number?')\n #emerg_num = input(\"What is your emergency's contact number?\")\n user_num = '+14168583844'\n emerg_num = '+14168583844'\n def status_1():\n stat1 = True\n\n #routing\n route = routing()\n places = route.possible_places()\n choice = route.chooser()\n start = route.loc\n end = choice\n instructions = route.routes(start, end)\n\n def status_2():\n stat2 = True\n \n #routing\n route = routing()\n places = route.possible_places()\n choice = route.chooser()\n start = route.loc\n end = choice\n instructions = route.routes(start, end)\n\n text = send_texts(user_num, emerg_num)\n text.send_text()\n print('------------------------------------------------------------SENT TEXT------------------------------------------------------------------')\n \n\n drowsy = drowsiness_detector()\n def start(drowsy):\n drowsy.vs_loop()\n\n vs = Thread(target = start, args = (drowsy, ))\n vs.start()\n print('Thread started with Status: ', drowsy.STATUS)\n\n while True:\n if (drowsy.STATUS == True) and (drowsy.STATUS_yawn == True):\n t = Thread(target = status_2)\n t.daemon = True\n t.start()\n break\n if (drowsy.STATUS == True ) and (drowsy.STATUS_yawn == False):\n t = Thread(target= status_2)\n t.daemon = True\n t.start()\n print('---------------STAT 1 ACTIVATED #1 ---------------')\n break\n if (drowsy.STATUS == False) and (drowsy.STATUS_yawn == True):\n t = Thread(target= status_1)\n t.daemon = True\n t.start()\n print('---------------STAT 1 ACTIVATED #2 ---------------')\n break\n\n\n\n \n\n" }, { "alpha_fraction": 0.5287846326828003, "alphanum_fraction": 0.5405117273330688, "avg_line_length": 36, "blob_id": "2a5f097ac24d7ab868b918b89d3d347809f68177", "content_id": "22452e3c0282fb14dbe10dc2aafd36031a4ed78b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 938, "license_type": "no_license", "max_line_length": 113, "num_lines": 25, "path": "/send_texts.py", "repo_name": "samy19980109/KasaSafe", "src_encoding": "UTF-8", "text": "from twilio.rest import Client\nclass send_texts:\n \n def __init__(self, destNumber, emergContact = None):\n # Account SID and Token are hidden for privacy\n self.accountSID = '*****************************'\n self.accounTOKEN = '*****************************'\n\n self.tw = Client(self.accountSID, self.accounTOKEN)\n self.twNumber = '+12898143339'\n self.destNumber = str(destNumber)\n self.emergContact = str(emergContact)\n\n\n def send_text(self):\n message = self.tw.messages.create(\n body = 'Hey! You seem a bit drowsy, Please wake up!', \n from_ = self.twNumber, \n to = self.destNumber\n )\n message = self.tw.messages.create(\n body = 'You are the emergery contact of ' + self.destNumber + ' Please make sure they drive safe !', \n from_ = self.twNumber, \n to = self.emergContact\n )\n\n \n\n\n\n" }, { "alpha_fraction": 0.5111455917358398, "alphanum_fraction": 0.534847617149353, "avg_line_length": 37.93406677246094, "blob_id": "349bf4e66847eb50e3a466852e61833d88fdd5f7", "content_id": "4383f6c8bf0015b363678a85255476c1873961e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7088, "license_type": "no_license", "max_line_length": 146, "num_lines": 182, "path": "/drowsy_driver.py", "repo_name": "samy19980109/KasaSafe", "src_encoding": "UTF-8", "text": "#importing the necessary packages\nfrom scipy.spatial import distance as dist\nfrom imutils.video import VideoStream\nfrom imutils import face_utils\nfrom threading import Thread\nimport numpy as np\nimport playsound\nimport argparse\nimport imutils\nimport time\nimport dlib\nimport cv2\n\nclass drowsiness_detector:\n def __init__(self, shape_predictor = None, alarm = None, webcam = None):\n \n #shape_predictor = self.shape_predictor\n #alarm = self.alarm\n #webcam = self.webcam\n\n self.shape_predictor = 'shape_predictor_68_face_landmarks.dat'\n self.alarm = 'alarm.wav'\n self.webcam = 'http://10.24.201.216:4747/mjpegfeed?640x480'\n\n # define two constants, one for the eye aspect ratio to indicate\n # blink and then a second constant for the number of consecutive\n # frames the eye must be below the threshold for to set off the\n # alarm\n self.EYE_AR_THRESH = 0.3\n self.EYE_AR_CONSEC_FRAMES = 20\n\n self.MOUTH_AR_THRESH = 30\n self.MOUTH_AR_CONSEC_FRAMES = 15\n \n # initialize the frame counter as well as a boolean used to\n # indicate if the alarm is going off\n self.COUNTER = 0\n self.COUNTER_yawn = 0\n\n self.ALARM_ON = False\n\n self.detector = dlib.get_frontal_face_detector()\n self.predictor = dlib.shape_predictor(self.shape_predictor) \n\n self.STATUS = False\n self.STATUS_yawn = False\n\n\n\n # grab the indexes of the facial landmarks for the left and\n # right eye, respectively\n (self.lStart, self.lEnd) = face_utils.FACIAL_LANDMARKS_IDXS[\"left_eye\"]\n (self.rStart, self.rEnd) = face_utils.FACIAL_LANDMARKS_IDXS[\"right_eye\"]\n\n (self.Start, self.End) = face_utils.FACIAL_LANDMARKS_IDXS[\"mouth\"]\n\n def mouth_aspect_ratio(self, mouth):\n #vertical dist (x-y)\n A = dist.euclidean(mouth[1], mouth[5])\n B = dist.euclidean(mouth[2], mouth[4])\n\n C = dist.euclidean(mouth[0], mouth[3])\n d = dist.euclidean(mouth[3], mouth[9])\n self.ma_ratio = (A + B) / (2.0 * C)\n return d\n\n def eye_aspect_ratio(self, eye):\n #vertical dist (x-y)\n A = dist.euclidean(eye[1], eye[5])\n B = dist.euclidean(eye[2], eye[4])\n\n C = dist.euclidean(eye[0], eye[3])\n\n self.ratio = (A + B) / (2.0 * C)\n return self.ratio\n \n def sound_alarm(self, path):\n \t# play an alarm sound\n\t playsound.playsound(path)\n\n def vs_loop(self):\n # start the video stream thread\n print(\"[INFO] starting video stream thread...\")\n self.vs = VideoStream(src='http://100.65.196.43:4747/mjpegfeed?640x480').start()\n #print(vs.isOpened())\n time.sleep(1.0)\n\n while True:\n # grab the frame from the threaded video file stream, resize\n # it, and convert it to grayscale\n # channels)\n frame = self.vs.read()\n frame = imutils.resize(frame, width=450)\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # detect faces in the grayscale frame\n self.rects = self.detector(gray, 0)\n\n # loop over the face detections\n for rect in self.rects:\n # determine the facial landmarks for the face region, then\n # convert the facial landmark (x, y)-coordinates to a NumPy\n # array\n shape = self.predictor(gray, rect)\n shape = face_utils.shape_to_np(shape)\n\n # extract the left and right eye coordinates, then use the\n # coordinates to compute the eye aspect ratio for both eyes\n leftEye = shape[self.lStart:self.lEnd]\n rightEye = shape[self.rStart:self.rEnd]\n leftEAR = self.eye_aspect_ratio(leftEye)\n rightEAR = self.eye_aspect_ratio(rightEye)\n\n # average the eye aspect ratio together for both eyes\n ear = (leftEAR + rightEAR) / 2.0\n\n # compute the convex hull for the left and right eye, then\n # visualize each of the eyes\n leftEyeHull = cv2.convexHull(leftEye)\n rightEyeHull = cv2.convexHull(rightEye)\n cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)\n cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)\n\n # check to see if the eye aspect ratio is below the blink\n # threshold, and if so, increment the blink frame counter\n if ear < self.EYE_AR_THRESH:\n self.COUNTER += 1\n\n # if the eyes were closed for a sufficient number of\n # then sound the alarm\n if self.COUNTER >= self.EYE_AR_CONSEC_FRAMES:\n self.STATUS = True\n # draw an alarm on the frame\n cv2.putText(frame, \"DROWSINESS ALERT!\", (10, 30),\n cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)\n #################################################################### ADD M U S I C HERE ###############################\n else:\n self.STATUS = False\n # otherwise, the eye aspect ratio is not below the blink\n # threshold, so reset the counter and alarm\n else:\n self.COUNTER = 0\n ALARM_ON = False\n\n # draw the computed eye aspect ratio on the frame to help\n # with debugging and setting the correct eye aspect ratio\n # thresholds and frame counters\n cv2.putText(frame, \"EAR: {:.2f}\".format(ear), (300, 30),\n cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)\n \n mouth = shape[self.Start:self.End]\n MAR = self.mouth_aspect_ratio(mouth)\n\n mouthHull = cv2.convexHull(mouth)\n cv2.drawContours(frame, [mouthHull], -1, (0, 255, 0), 1) #what do these numbers mean??\n\n if MAR > self.MOUTH_AR_THRESH:\n self.COUNTER_yawn += 1\n\n # if the eyes were closed for a sufficient number of\n # then sound the alarm\n if self.COUNTER_yawn >= self.MOUTH_AR_CONSEC_FRAMES:\n self.STATUS_yawn = True\n else:\n self.STATUS_yawn = False\n else:\n self.COUNTER_yawn = 0\n ALARM_ON = False\n \n cv2.putText(frame, \"MAR: {:.2f}\".format(MAR), (30, 300),\n cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)\n\n cv2.imshow(\"Frame\", frame)\n key = cv2.waitKey(1) & 0xFF\n # show the frame\n\n cv2.imshow(\"Frame\", frame)\n key = cv2.waitKey(1) & 0xFF\n \n # if the `q` key was pressed, break from the loop\n if key == ord(\"q\"):\n break\n\n\n" }, { "alpha_fraction": 0.4686397612094879, "alphanum_fraction": 0.4801686406135559, "avg_line_length": 39.87676239013672, "blob_id": "3189ccdd84b98cf20abbb2cc055fa910bd930bf6", "content_id": "4846cfd4219b5cfef7ec4ea27e35a51416fd93f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11623, "license_type": "no_license", "max_line_length": 170, "num_lines": 284, "path": "/routing.py", "repo_name": "samy19980109/KasaSafe", "src_encoding": "UTF-8", "text": "import requests\nimport googlemaps\nfrom datetime import datetime\nfrom json.decoder import JSONDecodeError\nimport json\nfrom random import *\n\nclass routing:\n def __init__(self):\n self.lat = 0\n self.lat = 0\n self.possible = []\n\n response = requests.post(url=\"https://www.googleapis.com/geolocation/v1/geolocate?key=<API_KEY>\")\n\n for key in response.json():\n if key == \"location\":\n for key2 in response.json()[key]:\n if key2 == \"lat\":\n self.lat = response.json()[key][key2]\n if key2 == \"lng\":\n self.long = response.json()[key][key2]\n self.loc = (self.lat, self.long)\n\n def possible_places(self):\n response = requests.get(\n \"https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=\" + str(self.lat) +\",\"+ str(self.long) +\"&type=cafe&radius=500&key=<API_KEY>\")\n # print(response.status_code)\n # print(\"\")\n # print(\"\")\n cafe = response.json()\n # print(json.dumps(response.json(), sort_keys=True, indent=4))\n\n for key1 in cafe:\n if key1 == \"results\":\n # print(key1)\n for item in cafe[key1]:\n # print(isinstance(item, dict))\n for key in item:\n if key == \"name\":\n self.possible.append(item[key])\n\n response = requests.get(\n \"https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=\" + str(self.lat) +\",\"+ str(self.long) +\"&type=restaurant&radius=500&key=<API_KEY>\")\n # print(response.status_code)\n # print(\"\")\n # print(\"\")\n restaurants = response.json()\n # print(json.dumps(response.json(), sort_keys=True, indent=4))\n\n for key1 in restaurants:\n if key1 == \"results\":\n # print(key1)\n for item in restaurants[key1]:\n # print(isinstance(item, dict))\n for key in item:\n if key == \"name\":\n self.possible.append(item[key])\n\n response = requests.get(\n \"https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=\" + str(self.lat) +\",\"+ str(self.long) +\"&type=bakery&radius=500&key=<API_KEY>\")\n # print(response.status_code)\n # print(\"\")\n # print(\"\")\n bakery = response.json()\n # print(json.dumps(response.json(), sort_keys=True, indent=4))\n\n for key1 in bakery:\n if key1 == \"results\":\n # print(key1)\n for item in bakery[key1]:\n # print(isinstance(item, dict))\n for key in item:\n if key == \"name\":\n self.possible.append(item[key])\n\n response = requests.get(\n \"https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=\" + str(self.lat) +\",\"+ str(self.long) +\"&type=bar&radius=500&key=<API_KEY>\")\n # print(response.status_code)\n # print(\"\")\n # print(\"\")\n bar = response.json()\n # print(json.dumps(response.json(), sort_keys=True, indent=4))\n\n for key1 in bar:\n if key1 == \"results\":\n # print(key1)\n for item in bar[key1]:\n # print(isinstance(item, dict))\n for key in item:\n if key == \"name\":\n self.possible.append(item[key])\n\n response = requests.get(\n \"https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=\" + str(self.lat) +\",\"+ str(self.long) +\"&type=parking&radius=500&key=<API_KEY>\")\n # print(response.status_code)\n # print(\"\")\n # print(\"\")\n parking = response.json()\n # print(json.dumps(response.json(), sort_keys=True, indent=4))\n\n for key1 in parking:\n if key1 == \"results\":\n # print(key1)\n for item in parking[key1]:\n # print(isinstance(item, dict))\n for key in item:\n if key == \"name\":\n self.possible.append(item[key])\n\n response = requests.get(\n \"https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=\" + str(self.lat) +\",\"+ str(self.long) +\"&type=taxi_stand&radius=500&key=<API_KEY>\")\n # print(response.status_code)\n # print(\"\")\n # print(\"\")\n taxi_stand = response.json()\n # print(json.dumps(response.json(), sort_keys=True, indent=4))\n\n for key1 in taxi_stand:\n if key1 == \"results\":\n # print(key1)\n for item in taxi_stand[key1]:\n # print(isinstance(item, dict))\n for key in item:\n if key == \"name\":\n self.possible.append(item[key])\n\n response = requests.get(\n \"https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=\" + str(self.lat) +\",\"+ str(self.long) +\"&type=gas_station&radius=1000&key=<API_KEY>\")\n # print(response.status_code)\n # print(\"\")\n # print(\"\")\n gas_station = response.json()\n # print(json.dumps(response.json(), sort_keys=True, indent=4))\n\n for key1 in gas_station:\n if key1 == \"results\":\n # print(key1)\n for item in gas_station[key1]:\n # print(isinstance(item, dict))\n for key in item:\n if key == \"name\":\n self.possible.append(item[key])\n\n response = requests.get(\n \"https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=\" + str(self.lat) +\",\"+ str(self.long) +\"&type=rv_park&radius=1000&key=<API_KEY>\")\n # print(response.status_code)\n # print(\"\")\n # print(\"\")\n rv_park = response.json()\n # print(json.dumps(response.json(), sort_keys=True, indent=4))\n\n for key1 in rv_park:\n if key1 == \"results\":\n # print(key1)\n for item in rv_park[key1]:\n # print(isinstance(item, dict))\n for key in item:\n if key == \"name\":\n self.possible.append(item[key])\n\n response = requests.get(\n \"https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=\" + str(self.lat) +\",\"+ str(self.long) +\"&type=lodging&radius=1000&key=<API_KEY>\")\n # print(response.status_code)\n # print(\"\")\n # print(\"\")\n lodging = response.json()\n # print(json.dumps(response.json(), sort_keys=True, indent=4))\n\n for key1 in lodging:\n if key1 == \"results\":\n # print(key1)\n for item in lodging[key1]:\n # print(isinstance(item, dict))\n for key in item:\n if key == \"name\":\n self.possible.append(item[key])\n\n response = requests.get(\n \"https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=\" + str(self.lat) +\",\"+ str(self.long) +\"&type=mosque&radius=1000&key=<API_KEY>\")\n # print(response.status_code)\n # print(\"\")\n # print(\"\")\n mosque = response.json()\n # print(json.dumps(response.json(), sort_keys=True, indent=4))\n\n for key1 in mosque:\n if key1 == \"results\":\n # print(key1)\n for item in mosque[key1]:\n # print(isinstance(item, dict))\n for key in item:\n if key == \"name\":\n self.possible.append(item[key])\n\n response = requests.get(\n \"https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=\" + str(self.lat) +\",\"+ str(self.long) +\"&type=hindu_temple&radius=1000&key=<API_KEY>\")\n # print(response.status_code)\n # print(\"\")\n # print(\"\")\n hindu_temple = response.json()\n # print(json.dumps(response.json(), sort_keys=True, indent=4))\n\n for key1 in hindu_temple:\n if key1 == \"results\":\n # print(key1)\n for item in hindu_temple[key1]:\n # print(isinstance(item, dict))\n for key in item:\n if key == \"name\":\n self.possible.append(item[key])\n\n response = requests.get(\n \"https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=\" + str(self.lat) +\",\"+ str(self.long) +\"&type=church&radius=1000&key=<API_KEY>\")\n # print(response.status_code)\n # print(\"\")\n # print(\"\")\n church = response.json()\n # print(json.dumps(response.json(), sort_keys=True, indent=4))\n\n for key1 in church:\n if key1 == \"results\":\n # print(key1)\n for item in church[key1]:\n # print(isinstance(item, dict))\n for key in item:\n if key == \"name\":\n self.possible.append(item[key])\n\n def chooser(self):\n ra_idx = randint(0, len(self.possible))\n\n return self.possible[ra_idx]\n \n\n def routes(self, origin, destination):\n gmaps = googlemaps.Client(key='<API_KEY>')\n\n # Geocoding an address\n geocode_result = gmaps.geocode('1600 Amphitheatre Parkway, Mountain View, CA')\n\n # Look up an address with reverse geocoding\n reverse_geocode_result = gmaps.reverse_geocode((self.lat, self.long))\n\n # Request directions via public transit\n now = datetime.now()\n directions_result = gmaps.directions(origin,\n destination,\n mode=\"driving\",\n departure_time=now)\n\n # print(json.dumps(directions_result, sort_keys=True, indent=4))\n\n for item in directions_result:\n for key in item:\n if key == \"legs\":\n for item1 in item[key]:\n for key2 in item1:\n if key2 == \"steps\":\n for item2 in item1[key2]:\n for key3 in item2:\n if key3 == \"html_instructions\":\n print(item2[key3])\n \n\"\"\" if __name__ == \"__main__\":\n #test = routing()\n #test.possible_places()\n #type(test.possible_places())\n #start = \"1426 Bishop Street, Montreal, Canada\"\n #end = \"Bahen Center for Information Technology, Toronto, Canada\"\n print(\"--------------------------------------------------1----------------------------------------------------------\")\n print(\"------------------------------------------------------------------------------------------------------------\")\n #test.routes(start, end)\n\n route = routing()\n places = route.possible_places()\n choice = route.chooser()\n print(choice)\n start = route.loc\n print(route.loc)\n end = choice\n print(\"---------------------------------------------------2---------------------------------------------------------\")\n print(\"------------------------------------------------------------------------------------------------------------\")\n instructions = route.routes(start, end) \"\"\"\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.8056603670120239, "alphanum_fraction": 0.8132075667381287, "avg_line_length": 87.33333587646484, "blob_id": "696ba11109cb444b9ab2b2b6f99a7b4f98bb0221", "content_id": "be734f94105e43dbe25f10d278b530f7d757ca30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 530, "license_type": "no_license", "max_line_length": 132, "num_lines": 6, "path": "/README.md", "repo_name": "samy19980109/KasaSafe", "src_encoding": "UTF-8", "text": "# KasaSafe\nA Computer Vision model that uses regression trees to identify faces within miliseconds (Kazemi &amp; Sullivan 2014) \nsubsequently plotting pins to the face to identify characteristics. \nFrom there using Eye Aspect Ratios (EAR) and Mouth distances, I identify eyelids closing and yawns signifying sleepiness.\nUsing a Drowsiness Alert, it then determines your status to either send you a routing options to the closest rest stop or sends you \nrouting options and sends a text message to you and to an Emergency Contact.\n" }, { "alpha_fraction": 0.5277711749076843, "alphanum_fraction": 0.5542312264442444, "avg_line_length": 34.25210189819336, "blob_id": "e2bb494532351e0d043af753946a9786d70a6f91", "content_id": "63dbc9fe49375ae99dba3384ff2aea47ad4b3eab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4195, "license_type": "no_license", "max_line_length": 102, "num_lines": 119, "path": "/yawn_detection.py", "repo_name": "samy19980109/KasaSafe", "src_encoding": "UTF-8", "text": "from scipy.spatial import distance as dist\nfrom imutils.video import VideoStream\nfrom imutils import face_utils\nfrom threading import Thread\nimport numpy as np\nimport playsound\nimport argparse\nimport imutils\nimport time\nimport dlib\nimport cv2\n\nclass yawn_detection:\n def __init__(self, shape_predictor = None, alarm = None, webcam = None):\n \n #shape_predictor = self.shape_predictor\n #alarm = self.alarm\n #webcam = self.webcam\n\n self.shape_predictor = 'shape_predictor_68_face_landmarks.dat'\n self.alarm = 'alarm.wav'\n self.webcam = 'http://10.24.201.216:4747/mjpegfeed?640x480'\n\n # define two constants, one for the eye aspect ratio to indicate\n # blink and then a second constant for the number of consecutive\n # frames the eye must be below the threshold for to set off the\n # alarm\n self.MOUTH_AR_THRESH = 0.65\n self.MOUTH_AR_CONSEC_FRAMES = 15\n \n # initialize the frame counter as well as a boolean used to\n # indicate if the alarm is going off\n self.COUNTER = 0\n self.ALARM_ON = False\n\n self.detector = dlib.get_frontal_face_detector()\n self.predictor = dlib.shape_predictor(self.shape_predictor) \n\n self.STATUS = False\n\n # grab the indexes of the facial landmarks for the mouth\n (self.Start, self.End) = face_utils.FACIAL_LANDMARKS_IDXS[\"mouth\"]\n \n def mouth_aspect_ratio(self, mouth):\n #vertical dist (x-y)\n A = dist.euclidean(mouth[1], mouth[5])\n B = dist.euclidean(mouth[2], mouth[4])\n\n C = dist.euclidean(mouth[0], mouth[3])\n\n self.ratio = (A + B) / (2.0 * C)\n return self.ratio\n \n def sound_alarm(self, path):\n # play an alarm sound\n\t playsound.playsound(path)\n\n def vs_loop(self):\n # start the video stream thread\n print(\"[INFO] starting video stream thread...\")\n self.vs = VideoStream(src='http://10.24.201.216:4747/mjpegfeed?640x480').start()\n \n #print(self.vs.isOpened())\n time.sleep(1.0)\n\n while True:\n # grab the frame from the threaded video file stream, resize\n # it, and convert it to grayscale\n # channels)\n frame = self.vs.read()\n frame = imutils.resize(frame, width=450)\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # detect faces in the grayscale frame\n self.rects = self.detector(gray, 0)\n\n # loop over the face detections\n for rect in self.rects:\n # determine the facial landmarks for the face region, then\n # convert the facial landmark (x, y)-coordinates to a NumPy\n # array\n shape = self.predictor(gray, rect)\n shape = face_utils.shape_to_np(shape)\n\n mouth = shape[self.Start:self.End]\n MAR = self.mouth_aspect_ratio(mouth)\n\n mouthHull = cv2.convexHull(mouth)\n cv2.drawContours(frame, [mouthHull], -1, (0, 255, 0), 1) #what do these numbers mean??\n\n if MAR < self.MOUTH_AR_THRESH:\n self.COUNTER += 1\n\n # if the eyes were closed for a sufficient number of\n # then sound the alarm\n if self.COUNTER >= self.MOUTH_AR_CONSEC_FRAMES:\n self.STATUS = True\n # draw an alarm on the frame\n cv2.putText(frame, \"DROWSINESS ALERT!\", (10, 30),\n cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)\n else:\n self.STATUS = False\n else:\n self.COUNTER = 0\n ALARM_ON = False\n \n cv2.putText(frame, \"MAR: {:.2f}\".format(MAR), (300, 30),\n cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)\n\n cv2.imshow(\"Frame\", frame)\n key = cv2.waitKey(1) & 0xFF\n \n # if the `q` key was pressed, break from the loop\n if key == ord(\"q\"):\n break\n\n\nyawn = yawn_detection()\nyawn.vs_loop()\n" } ]
6
panghanwu/matplotlib_demo
https://github.com/panghanwu/matplotlib_demo
ef760c4ac30f4cf34ee61cbb9a40893c0452743d
bd070771b6e95461c8903044dcdfc5376b8ebe1c
e11919dfbf4bc58710f9d1ab72e54cc55c75067f
refs/heads/master
2023-06-13T21:56:32.460681
2021-06-17T07:40:41
2021-06-17T07:40:41
340,633,415
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.537870466709137, "alphanum_fraction": 0.5883644223213196, "avg_line_length": 19.727272033691406, "blob_id": "13763821f07c4dd18d9d6270536ae9b046d8362b", "content_id": "02515f5faed162070d8582c99c86ae312d900aa3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 911, "license_type": "no_license", "max_line_length": 83, "num_lines": 44, "path": "/06_annotation.py", "repo_name": "panghanwu/matplotlib_demo", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport numpy as np\n\nx = np.linspace(-3, 3, 50)\ny = 2*x + 1\n\nx0 = 1\ny0 = 2*x0 + 1\n\nplt.figure()\n\n# set axes to (0, 0)\nax = plt.gca()\nax.spines['right'].set_color('none')\nax.spines['top'].set_color('none')\nax.yaxis.set_ticks_position('left')\nax.spines['left'].set_position(('data',0))\nax.xaxis.set_ticks_position('bottom')\nax.spines['bottom'].set_position(('data',0))\n\n# plot\nplt.plot(x, y)\nplt.scatter(x0, y0, s=50, color='b')\nplt.plot([x0,x0], [y0,0], 'k--', lw=2.5)\n\n# add annotation\n# method 1\nplt.annotate(\n r'$2x+1={}$'.format(y0), \n xy = (x0,y0), \n xycoords = 'data', \n xytext = (+30,-30), \n textcoords ='offset points',\n fontsize = 16,\n arrowprops = dict(\n arrowstyle = '->',\n connectionstyle = 'arc3,rad=.2'\n )\n)\n\n# method 2 (by add text)\nplt.text(-2.5, 3, r'$\\alpha\\ is\\ the\\ first.$', fontdict={'size':16,'color':'red'})\n\nplt.show()" }, { "alpha_fraction": 0.5950919985771179, "alphanum_fraction": 0.6462167501449585, "avg_line_length": 22.33333396911621, "blob_id": "21f7286caea232382886a13d8747ef9a26fdd1d9", "content_id": "496472b01f187a17f7c900cde5a06ddb74026a15", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 489, "license_type": "no_license", "max_line_length": 73, "num_lines": 21, "path": "/12_3d.py", "repo_name": "panghanwu/matplotlib_demo", "src_encoding": "UTF-8", "text": "from mpl_toolkits.mplot3d import Axes3D # 3D-axis module\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\nx = np.arange(-4, 4, 0.25)\ny = np.arange(-4, 4, 0.25)\n\nx, y = np.meshgrid(x, y)\nz = np.sin(np.sqrt(x**2 + y**2))\n\n# create figure\nfig = plt.figure(figsize=(6,6))\nax = Axes3D(fig) # plot 3D-axis\n\n# plot on \"ax\"\nax.plot_surface(x, y, z, rstride=1, cstride=1, cmap='rainbow')\nax.contourf(x, y, z, zdir='z', offset=-1.5, cmap='rainbow') # projection\nax.set_zlim(-2, 2)\n\nplt.show()" }, { "alpha_fraction": 0.5916666388511658, "alphanum_fraction": 0.6499999761581421, "avg_line_length": 14.125, "blob_id": "01ab3b5a9886d9533ceeaa78c6bf34ea7c6fb0eb", "content_id": "e695d6aa5e456828043f79fb1b1f558edb89aedd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 120, "license_type": "no_license", "max_line_length": 31, "num_lines": 8, "path": "/01_simple_demo.py", "repo_name": "panghanwu/matplotlib_demo", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport numpy as np\n\nx = np.linspace(-1, 1, 50)\ny = 2*x**2 + 1\n\nplt.plot(x, y)\nplt.show()" }, { "alpha_fraction": 0.6913580298423767, "alphanum_fraction": 0.7119341492652893, "avg_line_length": 16.428571701049805, "blob_id": "4d8bf0bfbba3d1aa1ec3b18c5a43ca9350082d98", "content_id": "aeed2708ecdafd57d68b7cc0d527d7345335eabc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 243, "license_type": "no_license", "max_line_length": 72, "num_lines": 14, "path": "/11_image.py", "repo_name": "panghanwu/matplotlib_demo", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport numpy as np\n\npixels = np.arange(16).reshape(4, 4)\n\nplt.figure()\nplt.xticks(())\nplt.yticks(())\n\nplt.imshow(pixels, interpolation='nearest', cmap='bone', origin='upper')\nplt.colorbar(shrink=.8)\n\n\nplt.show()" }, { "alpha_fraction": 0.6010498404502869, "alphanum_fraction": 0.6246719360351562, "avg_line_length": 20.799999237060547, "blob_id": "605dd345d859c5593a00ff7fd47806ca0ecbf98a", "content_id": "d3c4c470262bcec0b2fa1aa3fef0abf548fde381", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 762, "license_type": "no_license", "max_line_length": 55, "num_lines": 35, "path": "/07_ticks.py", "repo_name": "panghanwu/matplotlib_demo", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport numpy as np\n\nx = np.linspace(-3, 3, 50)\ny = 0.1*x\n\nplt.figure()\n\nplt.ylim(-2, 2)\n\nax = plt.gca()\n# set axes to (0, 0)\nax.spines['right'].set_color('none')\nax.spines['top'].set_color('none')\nax.spines['top'].set_color('none')\nax.xaxis.set_ticks_position('bottom')\nax.spines['bottom'].set_position(('data',0))\nax.yaxis.set_ticks_position('left')\nax.spines['left'].set_position(('data',0))\n\n# set label font-size\nfor label in ax.get_xticklabels()+ax.get_yticklabels():\n label.set_fontsize(12)\n label.set_bbox(\n dict(\n facecolor='white',\n edgecolor='none',\n alpha=.3 # transparency\n )\n )\n\n# zorder: set order for z-axis\nplt.plot(x, y, linewidth=10, zorder=1)\n\nplt.show()" }, { "alpha_fraction": 0.5296609997749329, "alphanum_fraction": 0.6398305296897888, "avg_line_length": 20.484848022460938, "blob_id": "47fc6f36cd586555865a18c3e51e1bcefe77e9ee", "content_id": "14bafd7227f11c143ecfa01e114e011b0a3d5d52", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 708, "license_type": "no_license", "max_line_length": 78, "num_lines": 33, "path": "/14_subplot_2.py", "repo_name": "panghanwu/matplotlib_demo", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\n\n# method 1\nplt.figure(num=1)\n\nax1 = plt.subplot2grid((3,3), (0,0), colspan=3, rowspan=1)\nax1.plot([1,2], [1,2])\nax1.set_title('ax1')\n\nax2 = plt.subplot2grid((3,3), (1,0), colspan=2)\nax3 = plt.subplot2grid((3,3), (1,2), rowspan=2)\nax4 = plt.subplot2grid((3,3), (2,0))\nax5 = plt.subplot2grid((3,3), (2,1))\n\n\n# method 2\nplt.figure(num=2)\ngs = gridspec.GridSpec(3, 3)\n\nax1 = plt.subplot(gs[0,:])\nax2 = plt.subplot(gs[1,:2])\nax3 = plt.subplot(gs[1:,2])\nax4 = plt.subplot(gs[-1,0])\nax5 = plt.subplot(gs[-1,-2])\n\n\n# method 3\nfig, ((ax11,ax12), (ax21,ax22)) = plt.subplots(2, 2, sharex=True, sharey=True)\nax11.scatter([1,2], [1,2])\n\n\nplt.show()" }, { "alpha_fraction": 0.5694863796234131, "alphanum_fraction": 0.6178247928619385, "avg_line_length": 23.518518447875977, "blob_id": "d96a6885c155db57522fd836b110a6600dbea690", "content_id": "481b97a030a6b99d7544321a27e3119d8ec9ff70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 662, "license_type": "no_license", "max_line_length": 67, "num_lines": 27, "path": "/09_bar.py", "repo_name": "panghanwu/matplotlib_demo", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport numpy as np\n\nn = 12\nx = np.arange(n)\ny_upper = (1-x/n) * np.random.uniform(0.5, 1.0, n)\ny_downer = (1-x/n) * np.random.uniform(0.5, 1.0, n)\n\n# set axes\nplt.xlim(-1, n)\nplt.ylim(-1.25, 1.25)\nplt.xticks(())\nplt.yticks(())\n\nplt.bar(x, +y_upper, facecolor='#9999ff', edgecolor='white')\nplt.bar(x, -y_downer, facecolor='#ff9999', edgecolor='white')\n\n# add annotation\nfor i, j in zip(x, y_upper):\n plt.text(i, j, '{:.2f}'.format(j), ha='center', va='bottom')\n # ha: horizontal alignment, va: vertical alignment\n\nfor i, j in zip(x, y_downer):\n plt.text(i, -j-0.02, '{:.2f}'.format(j), ha='center', va='top')\n\n\nplt.show()\n" }, { "alpha_fraction": 0.5654450058937073, "alphanum_fraction": 0.6204188466072083, "avg_line_length": 16.409090042114258, "blob_id": "c80365b188688215cb76c8556c00c610615b87c1", "content_id": "5df0910028dad2a0219359c8476112ed94014286", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 382, "license_type": "no_license", "max_line_length": 78, "num_lines": 22, "path": "/05_legend.py", "repo_name": "panghanwu/matplotlib_demo", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport numpy as np\n\nx = np.linspace(-3, 3, 50)\n\ny1 = 2*x + 1\ny2 = x**2 - 4\n\nplt.figure()\n\nl1, = plt.plot(x, y1, color='red', linewidth=1., linestyle='--', label='Line')\nl2, = plt.plot(x, y2, label='Curve')\n\n# add legend\nplt.legend(handles=[l1,l2,], loc='best')\n\nplt.xlim((0,3))\nplt.ylim((-4,6))\nplt.xlabel('X Label')\nplt.ylabel('Y Label')\n\nplt.show()" }, { "alpha_fraction": 0.5241228342056274, "alphanum_fraction": 0.5833333134651184, "avg_line_length": 15.925926208496094, "blob_id": "4e31f47159a24ff773fc8f56b85dd937ad7f81ad", "content_id": "823b5be0a0c5b8661aaf4825985680f8738996a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 456, "license_type": "no_license", "max_line_length": 68, "num_lines": 27, "path": "/03_axis_1.py", "repo_name": "panghanwu/matplotlib_demo", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport numpy as np\n\nx = np.linspace(-3, 3, 50)\n\ny1 = 2*x + 1\ny2 = x**2 - 4\n\nplt.figure()\nplt.plot(x, y1, color='red', linewidth=1., linestyle='--')\nplt.plot(x, y2)\n\nplt.xlim((0,2))\nplt.ylim((-4,6))\nplt.xlabel('X Label')\nplt.ylabel('Y Label')\n\n# ticks\nx_ticks = np.linspace(-2, 3, 11)\nplt.xticks(x_ticks)\nplt.yticks(\n [-1., 1.5, 4., 5.5],\n ['Low' ,'Normal', 'High', r'$So\\ high$'] # r'$str$': latex math\n)\n\n\nplt.show()" }, { "alpha_fraction": 0.537177562713623, "alphanum_fraction": 0.6206373572349548, "avg_line_length": 20.29032325744629, "blob_id": "bac0123848c31ce6fb37ab21d30fc565c078e0fd", "content_id": "adc32507ea0ab786b6d3951eb94e4b4f26534755", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 659, "license_type": "no_license", "max_line_length": 51, "num_lines": 31, "path": "/15_insert.py", "repo_name": "panghanwu/matplotlib_demo", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\n\nfig = plt.figure()\n\nx = [1, 2, 3, 4, 5, 6]\ny = [1, 6, 2, 5, 3, 4]\n\n# plot main figure\nleft, bottom, width, height = 0.1, 0.1, 0.8, 0.8\nax1 = fig.add_axes([left, bottom, width, height])\nax1.plot(x, y, 'r')\nax1.set_xlabel('X')\nax1.set_ylabel('Y')\nax1.set_title('Title')\n\n# plot insert\nleft, bottom, width, height = 0.2, 0.58, 0.25, 0.25\nax2 = fig.add_axes([left, bottom, width, height])\nax2.plot(x, y, 'b')\nax2.set_xlabel('X')\nax2.set_ylabel('Y')\nax2.set_title('Insert 1')\n\n# another method for insert\nplt.axes([0.6, 0.2, 0.25, 0.25])\nplt.plot(x, y, 'g')\nax2.set_xlabel('X')\nax2.set_ylabel('Y')\nplt.title('Insert 2')\n\nplt.show()" }, { "alpha_fraction": 0.6156583428382874, "alphanum_fraction": 0.6494662165641785, "avg_line_length": 18.413793563842773, "blob_id": "3069d0424849d5eaf64e3ff902a9199d7cc7ce7d", "content_id": "92b2c057b19215e38a443efd5d98b6d0d65ebd3e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 562, "license_type": "no_license", "max_line_length": 58, "num_lines": 29, "path": "/04_axis_2.py", "repo_name": "panghanwu/matplotlib_demo", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport numpy as np\n\nx = np.linspace(-3, 3, 50)\n\ny1 = 2*x + 1\ny2 = x**2 - 4\n\nplt.figure()\nplt.plot(x, y1, color='red', linewidth=1., linestyle='--')\nplt.plot(x, y2)\n\nplt.xlim((0,3))\nplt.ylim((-4,6))\nplt.xlabel('X Label')\nplt.ylabel('Y Label')\n\n# gca: get current axis\nax = plt.gca()\nax.spines['right'].set_color('none')\nax.spines['top'].set_color('none')\n\nax.yaxis.set_ticks_position('left')\nax.spines['left'].set_position(('data',1))\nax.xaxis.set_ticks_position('bottom')\nax.spines['bottom'].set_position(('data',-1))\n\n\nplt.show()" }, { "alpha_fraction": 0.5722891688346863, "alphanum_fraction": 0.6385542154312134, "avg_line_length": 18.58823585510254, "blob_id": "3bd23d383e45a3b72ff38f89fbf93fcc076380c5", "content_id": "58e889eef252f7a5aa6c03a695e883e0310269f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 332, "license_type": "no_license", "max_line_length": 54, "num_lines": 17, "path": "/08_scatter.py", "repo_name": "panghanwu/matplotlib_demo", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport numpy as np\n\nn = 1024\nx = np.random.normal(0, 1, n)\ny = np.random.normal(0, 1, n)\nc = np.arctan2(x, y) # generate color value\n\nplt.figure(figsize=(6,6))\nplt.xlim((-1.5,1.5))\nplt.ylim((-1.5,1.5))\nplt.xticks(())\nplt.yticks(())\n\nplt.scatter(x, y, s=75, c=c, cmap='rainbow', alpha=.5)\n\nplt.show()" }, { "alpha_fraction": 0.6100323796272278, "alphanum_fraction": 0.6294498443603516, "avg_line_length": 19.633333206176758, "blob_id": "12a67445b769e5bbc41350ba855df7a4a2fdf6dd", "content_id": "03503169074ed05c3bf017d5cdd185bbbc231b53", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 618, "license_type": "no_license", "max_line_length": 55, "num_lines": 30, "path": "/17_animation.py", "repo_name": "panghanwu/matplotlib_demo", "src_encoding": "UTF-8", "text": "from matplotlib import animation\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfig, ax = plt.subplots()\n\nx = np.arange(0, 2*np.pi, 0.01)\nline, = ax.plot(x, np.sin(x)) # suffix \",\" means tuple\n\ndef action(t):\n # update values of y at t\n line.set_ydata(np.sin(x+t/50))\n return line,\n\ndef init():\n # update values of y at t\n line.set_ydata(np.sin(x))\n return line,\n\n\nanm = animation.FuncAnimation(\n fig = fig, \n func = action, \n frames = 500, # total frames\n init_func = init, # the first frame\n interval = 20, # per ms\n blit = True # only update changed pixels\n)\n\nplt.show()" }, { "alpha_fraction": 0.5679442286491394, "alphanum_fraction": 0.6306620240211487, "avg_line_length": 14.1578950881958, "blob_id": "81db645800ddafeec9b1e48f4f9f21c8bba958b7", "content_id": "dadec5598c4d939fe32776b900572a77efb626d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 287, "license_type": "no_license", "max_line_length": 58, "num_lines": 19, "path": "/02_figures.py", "repo_name": "panghanwu/matplotlib_demo", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport numpy as np\n\nx = np.linspace(-3, 3, 50)\n\ny1 = 2*x + 1\ny2 = x**2 - 4\n\nplt.figure()\nplt.plot(x, y1)\n\nplt.figure()\nplt.plot(x, y2)\n\nplt.figure(num=6, figsize=(6,6))\nplt.plot(x, y1, color='red', linewidth=1., linestyle='--')\nplt.plot(x, y2)\n\nplt.show()" }, { "alpha_fraction": 0.605042040348053, "alphanum_fraction": 0.6420168280601501, "avg_line_length": 21.884614944458008, "blob_id": "9eb9ab8c4a70f2063f4e57a4db0648319a67b178", "content_id": "4873cdaa71c2986e2cfbaa7a0bcee9aa550dba06", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 595, "license_type": "no_license", "max_line_length": 95, "num_lines": 26, "path": "/10_contours.py", "repo_name": "panghanwu/matplotlib_demo", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport numpy as np\n\ndef height(x, y):\n return (1 - x/2 + x**5 + y**3) * np.exp(-x**2 - y**2)\n\nn = 256\nx = np.linspace(-3, 3, n)\ny = np.linspace(-3, 3, n)\n\nx_grid, y_grid = np.meshgrid(x, y)\n\n# create figure\nplt.figure(figsize=(6,6))\nplt.xticks(())\nplt.yticks(())\n\n# contourf: filled contour \nplt.contourf(x_grid, y_grid, height(x_grid, y_grid), 8, alpha=.75, cmap='hot')\n# plot contour lines\nct_line = plt.contour(x_grid, y_grid, height(x_grid, y_grid), 8, colors='black', linewidths=.7)\n\n# add labels\nplt.clabel(ct_line, inline=True, fontsize=10)\n\nplt.show()\n" } ]
15
mfkiwl/BPSK-Transceiver
https://github.com/mfkiwl/BPSK-Transceiver
ff7cc7f35c04c888eaf5198a5cbf33f319a9a787
440ffb01ead58c278a1b327246718d5a35c68afd
0ed5a1a1715f5223c877cfdea2085b8ce54ea5dd
refs/heads/master
2023-05-14T07:43:52.080215
2021-06-03T23:06:25
2021-06-03T23:06:25
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6827537417411804, "alphanum_fraction": 0.6870120763778687, "avg_line_length": 26.096153259277344, "blob_id": "b3cc18fae70841f105f782f4ffd4ac83ba29f46c", "content_id": "96133579ac99e1d8de7b4224874d371c5cce3c92", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1409, "license_type": "permissive", "max_line_length": 76, "num_lines": 52, "path": "/boards/ip/iprepo/inspector_v1_0/drivers/inspector_v1_0/src/inspector_sinit.c", "repo_name": "mfkiwl/BPSK-Transceiver", "src_encoding": "UTF-8", "text": "/**\n* @file inspector_sinit.c\n*\n* The implementation of the inspector driver's static initialzation\n* functionality.\n*\n* @note\n*\n* None\n*\n*/\n#ifndef __linux__\n#include \"xstatus.h\"\n#include \"xparameters.h\"\n#include \"inspector.h\"\nextern inspector_Config inspector_ConfigTable[];\n/**\n* Lookup the device configuration based on the unique device ID. The table\n* ConfigTable contains the configuration info for each device in the system.\n*\n* @param DeviceId is the device identifier to lookup.\n*\n* @return\n* - A pointer of data type inspector_Config which\n* points to the device configuration if DeviceID is found.\n* - NULL if DeviceID is not found.\n*\n* @note None.\n*\n*/\ninspector_Config *inspector_LookupConfig(u16 DeviceId) {\n inspector_Config *ConfigPtr = NULL;\n int Index;\n for (Index = 0; Index < XPAR_INSPECTOR_NUM_INSTANCES; Index++) {\n if (inspector_ConfigTable[Index].DeviceId == DeviceId) {\n ConfigPtr = &inspector_ConfigTable[Index];\n break;\n }\n }\n return ConfigPtr;\n}\nint inspector_Initialize(inspector *InstancePtr, u16 DeviceId) {\n inspector_Config *ConfigPtr;\n Xil_AssertNonvoid(InstancePtr != NULL);\n ConfigPtr = inspector_LookupConfig(DeviceId);\n if (ConfigPtr == NULL) {\n InstancePtr->IsReady = 0;\n return (XST_DEVICE_NOT_FOUND);\n }\n return inspector_CfgInitialize(InstancePtr, ConfigPtr);\n}\n#endif\n" }, { "alpha_fraction": 0.45139291882514954, "alphanum_fraction": 0.4615496098995209, "avg_line_length": 31.356807708740234, "blob_id": "438ef42568313f5fa84ed228c9d35b6830dd8a30", "content_id": "7b1b52b3a160d7b3ee54563640d48aa3eb2f5b2f", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6892, "license_type": "permissive", "max_line_length": 89, "num_lines": 213, "path": "/rfsoc_radio/sdr_plots.py", "repo_name": "mfkiwl/BPSK-Transceiver", "src_encoding": "UTF-8", "text": "__author__ = \"David Northcote\"\n__organisation__ = \"The Univeristy of Strathclyde\"\n__support__ = \"https://github.com/strath-sdr/rfsoc_radio\"\n\nimport numpy as np\nimport plotly.graph_objs as go\nimport ipywidgets as ipw\nfrom contextlib import contextmanager\n\n \nclass SpectrumAnalyser():\n def __init__(self,\n data,\n fs,\n animation_period=50,\n width=600,\n height=400,\n autosize=True):\n \"\"\"Create a new Spectrum Analyser object for plotting frequency against power.\"\"\"\n \n self._fs = fs\n self._data = self._data = [{'y' : self._fft_psd(data, self._fs),\n 'name' : 'IQ Freq (dB)'}]\n self._animation_period = animation_period\n self._width = width\n self._height = height\n self._yaxisrange = [-200, -50]\n lim = int(self._fs/2)\n self._xaxisrange = [-lim, lim - (self._fs/len(data))]\n self._autosize = autosize\n self._complex = isinstance(data[0], complex)\n \n self._layout = {\n 'height' : self._height,\n 'width' : self._width,\n 'autosize' : True,\n 'xaxis' : {\n 'range' : self._xaxisrange,\n 'title' : 'Frequency (Hz)',\n },\n 'yaxis' : {\n 'range' : self._yaxisrange,\n 'title' : 'Amplitude (dB)',\n },\n 'title' : 'Frequency Spectrum'\n }\n \n self._plot = go.FigureWidget(\n layout = self._layout,\n data = self._data\n )\n \n self._plot.layout.xaxis.range = self._xaxisrange\n self._plot.data[0].x = [x for x in np.arange(-lim, lim, (self._fs/len(data)))]\n \n def _fft_psd(self, data, fs):\n fft = np.fft.fftshift(np.fft.fft(data))\n mag = np.array([abs(y)**2/(fs*len(fft)) for y in fft])\n psd = 10 * np.where(mag>0, np.log10(mag), 0)\n return psd\n \n def set_frequency(self, fs):\n self._fs = fs\n \n def update_data(self, data):\n lim = int(self._fs/2)\n self._xaxisrange = [-lim, lim - (self._fs/len(data))]\n self._plot.layout.xaxis.range = self._xaxisrange\n self._data = self._fft_psd(data, self._fs)\n self._plot.data[0].y = self._data\n self._plot.data[0].x = [x for x in np.arange(-lim, lim, (self._fs/len(data)))]\n \n def get_widget(self):\n return self._plot \n \n\nclass TimePlot():\n def __init__(self,\n data,\n animation_period=50,\n width=600,\n height=400,\n autosize=True):\n \"\"\"Create a new plot object for plotting data against time.\"\"\"\n \n self._data = np.empty(len(data))\n self._animation_period = animation_period\n self._width = width\n self._height = height\n self._yaxisrange = [-0.3, 0.3]\n self._xaxisrange = [-0.5, len(data)-0.5]\n self._autosize = autosize\n self._complex = isinstance(data[0], complex)\n \n if self._complex:\n self._data = [{'y' : np.real(data),\n 'name' : 'Real Signal'},\n {'y' : np.imag(data),\n 'name' : 'Imag Signal'}]\n else:\n self._data = [{'y' : data, 'name' : 'Time Signal'}]\n \n self._layout = {\n 'height' : self._height,\n 'width' : self._width,\n 'autosize' : True,\n 'xaxis' : {\n 'range' : self._xaxisrange,\n 'title' : 'Samples',\n },\n 'yaxis' : {\n 'range' : self._yaxisrange,\n 'title' : 'Amplitude',\n },\n 'title' : 'Time Domain Signal'\n }\n \n self._plot = go.FigureWidget(\n layout = self._layout,\n data = self._data\n )\n \n def set_axisrange(self, axisrange):\n self._yaxisrange = axisrange\n self._plot.layout.yaxis.range = axisrange\n \n def update_data(self, data):\n \"\"\"Update the frame of data currently on the canvas\n \"\"\"\n self._xaxisrange = len(data)\n self._plot.layout.xaxis.range = [-0.5, self._xaxisrange-0.5]\n \n if self._complex:\n self._data = [{'y' : np.real(data)},\n {'y' : np.imag(data)}]\n else:\n self._data = [{'y' : data}]\n\n self._plot.data[0].y = self._data[0].get('y')\n if self._complex:\n self._plot.data[1].y = self._data[1].get('y')\n \n def get_widget(self):\n return self._plot\n\n \nclass ConstellationPlot():\n def __init__(self,\n data,\n animation_period=50,\n height=400,\n width=600,\n autosize=True):\n \"\"\"Creates a new plot object for plotting IQ constellations.\"\"\"\n \n self._data = data\n self._animation_period = 50\n self._width = width\n self._height = height\n self._axisrange = [-0.3, 0.3]\n self._autosize = autosize\n self._complex = isinstance(self._data[0], complex)\n \n if not self._complex:\n raise Exception('Input data is not of type complex.')\n \n self._layout = {\n 'hovermode' : 'closest',\n 'height' : self._height,\n 'width' : self._width,\n 'autosize' : self._autosize,\n 'xaxis' : {\n 'range' : self._axisrange,\n 'title' : 'In-phase Amplitude'\n },\n 'yaxis' : {\n 'range' : self._axisrange,\n 'title' : 'Quadrature Amplitude'\n },\n 'title' : 'Constellation Plot'\n }\n \n self._plot = go.FigureWidget(\n layout = self._layout,\n data = [{\n 'mode' : 'markers',\n 'x' : np.real(self._data),\n 'y' : np.imag(self._data)\n }])\n \n def set_axisrange(self, axisrange):\n self._axisrange = axisrange\n self._plot.layout.yaxis.range = axisrange\n self._plot.layout.xaxis.range = axisrange\n \n def update_data(self, data):\n \"\"\"Update the frame of data currently on the canvas\n \"\"\"\n \n if not isinstance(data[0], complex):\n raise Exception('Input data is not of type complex.')\n \n if len(data) > 8:\n step = int(len(data)/8)\n self._data = data[::step]\n else:\n self._data = data\n \n self._plot.data[0].update({'x' : np.real(self._data),\n 'y' : np.imag(self._data)}) \n \n def get_widget(self):\n return self._plot\n" }, { "alpha_fraction": 0.711442768573761, "alphanum_fraction": 0.7402985095977783, "avg_line_length": 49.25, "blob_id": "b664581ea38c27d16a8bca85af6ea73c2ecc53ec", "content_id": "aacadf9c0036028c20a3ee68dc9a97680ff0bd92", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1005, "license_type": "permissive", "max_line_length": 78, "num_lines": 20, "path": "/boards/ip/iprepo/bpsk_receiver_v1_0/drivers/bpsk_receiver_v1_0/src/bpsk_receiver_hw.h", "repo_name": "mfkiwl/BPSK-Transceiver", "src_encoding": "UTF-8", "text": "/**\n*\n* @file bpsk_receiver_hw.h\n*\n* This header file contains identifiers and driver functions (or\n* macros) that can be used to access the device. The user should refer to the\n* hardware device specification for more details of the device operation.\n*/ \n#define BPSK_RECEIVER_THRESHOLD 0xc/**< threshold */\n#define BPSK_RECEIVER_RESET_TIME_SYNC 0x0/**< reset_time_sync */\n#define BPSK_RECEIVER_RESET_PHASE_SYNC 0x4/**< reset_phase_sync */\n#define BPSK_RECEIVER_RESET_FRAME_SYNC 0x8/**< reset_frame_sync */\n#define BPSK_RECEIVER_OBSERVATION_POINT 0x24/**< observation_point */\n#define BPSK_RECEIVER_ENABLE_TRANSFER 0x1c/**< enable_transfer */\n#define BPSK_RECEIVER_COARSE_PASSTHROUGH 0x2c/**< coarse_passthrough */\n#define BPSK_RECEIVER_DATA_COUNT 0x20/**< data_count */\n#define BPSK_RECEIVER_FRAME_SIZE 0x10/**< frame_size */\n#define BPSK_RECEIVER_FREQ_OFFSET 0x28/**< freq_offset */\n#define BPSK_RECEIVER_PACKET_COUNT 0x18/**< packet_count */\n#define BPSK_RECEIVER_PACKET_SIZE 0x14/**< packet_size */\n" }, { "alpha_fraction": 0.7337461113929749, "alphanum_fraction": 0.7469040155410767, "avg_line_length": 31.299999237060547, "blob_id": "935b97b2213c7e1b40aa86ba0dd124efadc4366e", "content_id": "a254a0484f079a011af4a6f7b795b380f418b4cb", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1292, "license_type": "permissive", "max_line_length": 106, "num_lines": 40, "path": "/boards/ip/iprepo/rfsoc_transmitter_v1_0/drivers/rfsoc_transmitter_v1_0/src/rfsoc_transmitter.c", "repo_name": "mfkiwl/BPSK-Transceiver", "src_encoding": "UTF-8", "text": "#include \"rfsoc_transmitter.h\"\n#ifndef __linux__\nint rfsoc_transmitter_CfgInitialize(rfsoc_transmitter *InstancePtr, rfsoc_transmitter_Config *ConfigPtr) {\n Xil_AssertNonvoid(InstancePtr != NULL);\n Xil_AssertNonvoid(ConfigPtr != NULL);\n\n InstancePtr->rfsoc_transmitter_BaseAddress = ConfigPtr->rfsoc_transmitter_BaseAddress;\n\n InstancePtr->IsReady = 1;\n return XST_SUCCESS;\n}\n#endif\nvoid rfsoc_transmitter_enable_tx_write(rfsoc_transmitter *InstancePtr, u32 Data) {\n\n Xil_AssertVoid(InstancePtr != NULL);\n\n rfsoc_transmitter_WriteReg(InstancePtr->rfsoc_transmitter_BaseAddress, 4, Data);\n}\nu32 rfsoc_transmitter_enable_tx_read(rfsoc_transmitter *InstancePtr) {\n\n u32 Data;\n Xil_AssertVoid(InstancePtr != NULL);\n\n Data = rfsoc_transmitter_ReadReg(InstancePtr->rfsoc_transmitter_BaseAddress, 4);\n return Data;\n}\nvoid rfsoc_transmitter_enable_data_write(rfsoc_transmitter *InstancePtr, u32 Data) {\n\n Xil_AssertVoid(InstancePtr != NULL);\n\n rfsoc_transmitter_WriteReg(InstancePtr->rfsoc_transmitter_BaseAddress, 0, Data);\n}\nu32 rfsoc_transmitter_enable_data_read(rfsoc_transmitter *InstancePtr) {\n\n u32 Data;\n Xil_AssertVoid(InstancePtr != NULL);\n\n Data = rfsoc_transmitter_ReadReg(InstancePtr->rfsoc_transmitter_BaseAddress, 0);\n return Data;\n}\n" }, { "alpha_fraction": 0.710299015045166, "alphanum_fraction": 0.7269102931022644, "avg_line_length": 26.870370864868164, "blob_id": "b3337777bcf387f5b7d893085594933df8b90fd0", "content_id": "2b2983e9ac67517ce58e218fb5f57a6124587d71", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1505, "license_type": "permissive", "max_line_length": 82, "num_lines": 54, "path": "/boards/ip/iprepo/inspector_v1_0/drivers/inspector_v1_0/src/inspector.c", "repo_name": "mfkiwl/BPSK-Transceiver", "src_encoding": "UTF-8", "text": "#include \"inspector.h\"\n#ifndef __linux__\nint inspector_CfgInitialize(inspector *InstancePtr, inspector_Config *ConfigPtr) {\n Xil_AssertNonvoid(InstancePtr != NULL);\n Xil_AssertNonvoid(ConfigPtr != NULL);\n\n InstancePtr->inspector_BaseAddress = ConfigPtr->inspector_BaseAddress;\n\n InstancePtr->IsReady = 1;\n return XST_SUCCESS;\n}\n#endif\nvoid inspector_reset_dim_write(inspector *InstancePtr, u32 Data) {\n\n Xil_AssertVoid(InstancePtr != NULL);\n\n inspector_WriteReg(InstancePtr->inspector_BaseAddress, 0, Data);\n}\nu32 inspector_reset_dim_read(inspector *InstancePtr) {\n\n u32 Data;\n Xil_AssertVoid(InstancePtr != NULL);\n\n Data = inspector_ReadReg(InstancePtr->inspector_BaseAddress, 0);\n return Data;\n}\nvoid inspector_packetsize_dim_write(inspector *InstancePtr, u32 Data) {\n\n Xil_AssertVoid(InstancePtr != NULL);\n\n inspector_WriteReg(InstancePtr->inspector_BaseAddress, 8, Data);\n}\nu32 inspector_packetsize_dim_read(inspector *InstancePtr) {\n\n u32 Data;\n Xil_AssertVoid(InstancePtr != NULL);\n\n Data = inspector_ReadReg(InstancePtr->inspector_BaseAddress, 8);\n return Data;\n}\nvoid inspector_enable_dim_write(inspector *InstancePtr, u32 Data) {\n\n Xil_AssertVoid(InstancePtr != NULL);\n\n inspector_WriteReg(InstancePtr->inspector_BaseAddress, 4, Data);\n}\nu32 inspector_enable_dim_read(inspector *InstancePtr) {\n\n u32 Data;\n Xil_AssertVoid(InstancePtr != NULL);\n\n Data = inspector_ReadReg(InstancePtr->inspector_BaseAddress, 4);\n return Data;\n}\n" }, { "alpha_fraction": 0.5291697978973389, "alphanum_fraction": 0.5386437177658081, "avg_line_length": 38.517242431640625, "blob_id": "5fe53490622a9bfda93ab7244aa4e5e85ca35bca", "content_id": "9999399c61218c08edbfd70c1b0c7011784f1c55", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8022, "license_type": "permissive", "max_line_length": 137, "num_lines": 203, "path": "/boards/RFSoC2x2/rfsoc_radio/drivers/overlay.py", "repo_name": "mfkiwl/BPSK-Transceiver", "src_encoding": "UTF-8", "text": "__author__ = \"David Northcote\"\n__organisation__ = \"The Univeristy of Strathclyde\"\n__support__ = \"https://github.com/strath-sdr/rfsoc_radio\"\n\nfrom pynq import Overlay\nfrom pynq import allocate\nimport xrfclk\nimport xrfdc\nimport os\nimport numpy as np\nimport ipywidgets as ipw\nimport time\n\n# Import overlay specific drivers\nfrom .quick_widgets import Button, TransmitTerminal, ReceiveTerminal\nfrom .bpsk_receiver import BpskReceiver\nfrom .bpsk_transmitter import BpskTransmitter\nfrom .data_inspector import *\n\n\nclass BpskOverlay(Overlay):\n \n def __init__(self, bitfile_name=None, init_rf_clks=True, **kwargs):\n \n # Generate default bitfile name\n if bitfile_name is None:\n this_dir = os.path.dirname(__file__)\n bitfile_name = os.path.join(this_dir, 'bitstream', 'rfsoc_radio.bit')\n \n # Create Overlay\n super().__init__(bitfile_name, **kwargs)\n\n # Initialise I2C\n self.init_i2c()\n \n # Extract friendly dataconverter names\n self.rf = self.usp_rf_data_converter\n self.dac_tile = self.rf.dac_tiles[1]\n self.dac_block = self.dac_tile.blocks[0]\n self.adc_tile = self.rf.adc_tiles[2]\n self.adc_block = self.adc_tile.blocks[0]\n \n # Start up LMX clock\n if init_rf_clks:\n xrfclk.set_ref_clks()\n \n # Set DAC defaults\n self.dac_tile.DynamicPLLConfig(1, 409.6, 1024)\n self.dac_block.NyquistZone = 1\n self.dac_block.MixerSettings = {\n 'CoarseMixFreq' : xrfdc.COARSE_MIX_BYPASS,\n 'EventSource' : xrfdc.EVNT_SRC_IMMEDIATE,\n 'FineMixerScale' : xrfdc.MIXER_SCALE_1P0,\n 'Freq' : 64,\n 'MixerMode' : xrfdc.MIXER_MODE_C2R,\n 'MixerType' : xrfdc.MIXER_TYPE_FINE,\n 'PhaseOffset' : 0.0\n }\n self.dac_block.UpdateEvent(xrfdc.EVENT_MIXER)\n self.dac_tile.SetupFIFO(True)\n \n # Set ADC defaults\n self.adc_tile.DynamicPLLConfig(1, 409.6, 1024)\n self.adc_block.NyquistZone = 1\n self.adc_block.MixerSettings = {\n 'CoarseMixFreq' : xrfdc.COARSE_MIX_BYPASS,\n 'EventSource' : xrfdc.EVNT_SRC_TILE,\n 'FineMixerScale' : xrfdc.MIXER_SCALE_1P0,\n 'Freq' : 64,\n 'MixerMode' : xrfdc.MIXER_MODE_R2C,\n 'MixerType' : xrfdc.MIXER_TYPE_FINE,\n 'PhaseOffset' : 0.0\n }\n self.adc_block.UpdateEvent(xrfdc.EVENT_MIXER)\n self.adc_tile.SetupFIFO(True)\n \n # Obtain friendly names for IP Cores and associated drivers\n self.receiver = BpskReceiver(self.axi_dma_rx, self.bpsk_receiver, self.DataInspector) # The receiver is coupled with an inspector\n self.transmitter = BpskTransmitter(self.axi_dma_tx, self.bpsk_transmitter)\n \n # Receiver setup requirements - pull resets low\n self.receiver.controller.reset_time_sync = 0\n self.receiver.controller.reset_phase_sync = 0\n self.receiver.controller.reset_frame_sync = 0\n\n def init_i2c(self):\n \"\"\"Initialize the I2C control drivers on RFSoC2x2.\n This should happen after a bitstream is loaded since I2C reset\n is connected to PL pins. The I2C-related drivers are made loadable\n modules so they can be removed or inserted.\"\"\"\n module_list = ['i2c_dev', 'i2c_mux_pca954x', 'i2c_mux']\n for module in module_list:\n cmd = \"if lsmod | grep {0}; then rmmod {0}; fi\".format(module)\n ret = os.system(cmd)\n if ret:\n raise RuntimeError(\n 'Removing kernel module{} failed.'.format(module))\n \n module_list.reverse()\n for module in module_list:\n cmd = \"modprobe {}\".format(module)\n ret = os.system(cmd)\n if ret:\n raise RuntimeError(\n 'Inserting kernel module {} failed.'.format(module))\n \n def dashboard(self):\n \n def dashboard_callback(value, button_id = 0):\n if button_id == 0:\n self.transmitter.controller.enable_transmitter = int(value)\n elif button_id == 1:\n self.receiver.controller.coarse_passthrough = int(not value)\n elif button_id == 2:\n self.receiver.controller.reset_time_sync = int(not value)\n elif button_id == 3:\n self.receiver.controller.reset_phase_sync = int(not value)\n elif button_id == 4:\n self.receiver.controller.reset_frame_sync = int(not value)\n else:\n pass\n \n def adc_callback(change):\n self.adc_block.MixerSettings[\"Freq\"] = change['new']\n self.adc_block.UpdateEvent(xrfdc.EVENT_MIXER)\n freq_label.value = 'Reported Frequency Offset: ' + \\\n str(self.receiver.controller.freq_offset)\n \n def dac_callback(change):\n self.dac_block.MixerSettings[\"Freq\"] = change['new']\n self.dac_block.UpdateEvent(xrfdc.EVENT_MIXER)\n freq_label.value = 'Reported Frequency Offset: ' + \\\n str(self.receiver.controller.freq_offset)\n \n # Create button descriptions\n desc_b = ['Transmit Enable', \n 'Coarse Sync', \n 'Time Sync', \n 'Carrier Sync', \n 'Frame Sync']\n \n buttons = [None]*5\n widgets = [None]*5\n \n # Create buttons\n for i in range(5):\n buttons[i] = Button(description=desc_b[i],\n state=True,\n callback=dashboard_callback,\n button_id=i)\n \n # Create float text objects for entering the ADC & DAC mixer frequencies\n adc_fc = ipw.FloatText(\n value=64,\n description='ADC Frequency (MHz):',\n style={'description_width': 'initial'},\n disabled=False\n )\n dac_fc = ipw.FloatText(\n value=64,\n description='DAC Frequency (MHz):',\n style={'description_width': 'initial'},\n disabled=False\n )\n adc_fc.observe(adc_callback, names='value')\n dac_fc.observe(dac_callback, names='value')\n \n layout = ipw.Layout(display='inline-flex',\n justify_content='flex-start',\n align_items='flex-start',\n align_content='flex-start')\n \n freq_label = ipw.Label('Reported Frequency Offset: ' + \\\n str(self.receiver.controller.freq_offset) + \\\n ' Hz')\n\n button_container = ipw.VBox([ipw.HBox([buttons[1].get_widget(), buttons[2].get_widget()]),\n ipw.HBox([buttons[3].get_widget(), buttons[4].get_widget()])])\n\n dashboard = ipw.VBox(children=[\n ipw.HBox(children=[dac_fc,\n buttons[0].get_widget()],\n layout=layout),\n ipw.HBox(children=[adc_fc,\n button_container], \n layout=layout),\n freq_label\n ],\n layout=layout\n )\n\n dashboard_accordion = ipw.Accordion(children=[dashboard])\n dashboard_accordion.set_title(0, 'System Control')\n \n return dashboard_accordion\n\n def _bpsk_radio_generator(self):\n sidebar = ipw.VBox([self.dashboard(), self.receiver.visualise()])\n msgbar = ipw.VBox([self.transmitter.terminal(), self.receiver.terminal()])\n return ipw.HBox([sidebar, msgbar])\n\n def bpsk_radio_application(self):\n return self._bpsk_radio_generator()\n" }, { "alpha_fraction": 0.6541745662689209, "alphanum_fraction": 0.6584440469741821, "avg_line_length": 27.106666564941406, "blob_id": "91696c5b37442653b15e90656dd7e9fd734de6c1", "content_id": "4dc05d25470ced26fc3be0a2842fb16225fbc410", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2108, "license_type": "permissive", "max_line_length": 79, "num_lines": 75, "path": "/setup.py", "repo_name": "mfkiwl/BPSK-Transceiver", "src_encoding": "UTF-8", "text": "import os\nimport shutil\nfrom distutils.dir_util import copy_tree\n\nfrom setuptools import find_packages, setup\n\n# global variables\npackage_name = 'rfsoc_radio'\npip_name = 'rfsoc-radio'\nboard = os.environ['BOARD']\nrepo_board_folder = f'boards/{board}/{package_name}'\nboard_notebooks_dir = os.environ['PYNQ_JUPYTER_NOTEBOOKS']\nboard_project_dir = os.path.join(board_notebooks_dir, 'bpsk-demonstrator')\n\ndata_files = []\n\n\n# check whether board is supported\ndef check_env():\n if not os.path.isdir(repo_board_folder):\n raise ValueError(\"Board {} is not supported.\".format(board))\n if not os.path.isdir(board_notebooks_dir):\n raise ValueError(\n \"Directory {} does not exist.\".format(board_notebooks_dir))\n\n\n# copy overlays to python package\ndef copy_overlays():\n src_ol_dir = os.path.join(repo_board_folder, 'bitstream')\n dst_ol_dir = os.path.join(package_name, 'bitstream')\n copy_tree(src_ol_dir, dst_ol_dir)\n data_files.extend(\n [os.path.join(\"..\", dst_ol_dir, f) for f in os.listdir(dst_ol_dir)])\n\n\n# copy notebooks to jupyter home\ndef copy_notebooks():\n src_nb_dir = os.path.join(repo_board_folder, 'notebooks')\n dst_nb_dir = os.path.join(board_project_dir)\n if os.path.exists(dst_nb_dir):\n shutil.rmtree(dst_nb_dir)\n copy_tree(src_nb_dir, dst_nb_dir)\n\n\n# copy driver to python package\ndef copy_drivers():\n src_dr_dir = os.path.join(repo_board_folder, 'drivers')\n dst_dr_dir = os.path.join(package_name)\n copy_tree(src_dr_dir, dst_dr_dir)\n data_files.extend(\n [os.path.join(\"..\", dst_dr_dir, f) for f in os.listdir(dst_dr_dir)])\n\n\ncheck_env()\ncopy_overlays()\ncopy_drivers()\ncopy_notebooks()\n\n\nsetup(\n name=\"rfsoc_radio\",\n version='0.1.2',\n install_requires=[\n 'pynq==2.6',\n 'plotly==4.5.2',\n ],\n url='https://github.com/strath-sdr/rfsoc_radio.git',\n license='BSD 3-Clause License',\n author=\"David Northcote\",\n author_email=\"[email protected]\",\n packages=find_packages(),\n package_data={\n '': data_files,\n },\n description=\"PYNQ example of using the RFSoC as a BPSK radio transceiver.\")\n" }, { "alpha_fraction": 0.5289915204048157, "alphanum_fraction": 0.5324083566665649, "avg_line_length": 36.00382995605469, "blob_id": "843ac610011c4a5770165809363ff91533967ce4", "content_id": "f2e00f529bbc4edbf5234d18af75db6af6dfe451", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9658, "license_type": "permissive", "max_line_length": 85, "num_lines": 261, "path": "/rfsoc_radio/quick_widgets.py", "repo_name": "mfkiwl/BPSK-Transceiver", "src_encoding": "UTF-8", "text": "__author__ = \"David Northcote\"\n__organisation__ = \"The Univeristy of Strathclyde\"\n__support__ = \"https://github.com/strath-sdr/rfsoc_radio\"\n\nimport ipywidgets as ipw\n\n\nclass DropdownMenu():\n \"\"\"Creates a new drop-down widget.\n \"\"\"\n def __init__(self, options, name, init_value):\n super().__init__()\n \n self._dropdown_menu = ipw.Dropdown(\n options=options,\n value=init_value,\n description=name,\n style={'description_width': 'initial'},\n disabled=False\n )\n \n def get_widget(self):\n return self._dropdown_menu\n\n \nclass ImageViewer():\n \"\"\"Creates a new image viewer widget for display an image in bytes format.\n Useful for updating an image inline with Jupyter Labs.\n \"\"\"\n def __init__(self, description='Image Viewer', \n width='200px', height='200px'):\n super().__init__()\n \n # Create a label object to act as a description for the image\n self._title = ipw.Label(\n value=description)\n \n # Create the image viewer object to display an image\n self._image_viewer = ipw.Image(\n format='png',\n width=width,\n height=height)\n \n def update(self, image):\n if isinstance(image, (bytes,bytearray)):\n self._image_viewer.value = image\n else:\n raise ValueError('Image should be given in bytes.')\n \n def get_widget(self):\n return ipw.VBox([self._title, self._image_viewer])\n\n\nclass TransmitTerminal():\n \"\"\"Creates a new textbox area widget that should be interacted with by the user.\n The textbox area is primarily used to display text. Buttons are used to control\n whether the text is written to the textbox area through self._listening. A clear \n button is provided to clear the text from the textbox area.\n \"\"\"\n def __init__(self, description='Terminal', height='200px', width='400px'):\n super().__init__()\n\n # Create the text area object that acts as a terminal\n self._text_terminal = ipw.Textarea(\n value='',\n placeholder='',\n description='',\n layout={'height' : height, 'width' : width},\n disabled=False\n )\n\n # Create label for terminal description\n self._label_terminal = ipw.Label(\n value=description,\n style={'description_width': 'initial'},\n )\n \n self.callback = []\n \n # Create a start button to enable listening\n self._start_button = ipw.Button(description='Send',\n layout=ipw.Layout(margin='auto',\n border='solid white'))\n self._start_button.on_click(lambda _: self.start())\n self._start_button.style.button_color = 'lightgray'\n\n # Create a clear button for our text terminal\n self._clear_button = ipw.Button(description='Clear', \n layout=ipw.Layout(margin='auto',\n border='solid white'))\n self._clear_button.on_click(lambda _: self.clear())\n self._clear_button.style.button_color = 'lightgray'\n\n # Create accordion\n self._accordion = ipw.Accordion(children=[\n ipw.HBox([ipw.VBox([self._text_terminal]),\n ipw.VBox([self._start_button,\n self._clear_button],\n layout=ipw.Layout(align_self='flex-start'))\n ])\n ])\n self._accordion.set_title(0, description)\n \n def value(self):\n return self._text_terminal.value\n \n def clear(self):\n self._text_terminal.value = ''\n \n def start(self):\n self._start_button.style.button_color = 'lightgreen'\n for callback in self.callback:\n callback()\n self._start_button.style.button_color = 'lightgray'\n \n def get_widget(self):\n return self._accordion\n\n \nclass ReceiveTerminal():\n \"\"\"Creates a new textbox area widget that should not be interacted with by user.\n The textbox area is primarily used to display text. Buttons are used to control\n whether the text is written to the textbox area through self._listening. A clear \n button is provided to clear the text from the textbox area.\n \"\"\"\n def __init__(self, description='Terminal', height='200px', width='400px'):\n super().__init__()\n\n # Create the text area object that acts as a terminal\n self._text_terminal = ipw.Textarea(\n value='',\n placeholder='',\n description='',\n layout={'height' : height, 'width' : width},\n disabled=True\n )\n\n # Create label for terminal description\n self._label_terminal = ipw.Label(\n value=description,\n style={'description_width': 'initial'},\n )\n \n self._counter = 0\n self._auto_clear = True\n \n # Create a autoclear button for our text terminal\n self._auto_clear_button = ipw.Button(description='Auto Clear',\n layout=ipw.Layout(margin='auto',\n border='solid white'))\n self._auto_clear_button.on_click(lambda _: self.autoclear())\n self._auto_clear_button.style.button_color = 'lightblue'\n \n # Create a clear button for our text terminal\n self._clear_button = ipw.Button(description='Clear', \n layout=ipw.Layout(margin='auto',\n border='solid white'))\n self._clear_button.on_click(lambda _: self.clear())\n self._clear_button.style.button_color = 'lightgray'\n \n # Create a start button to enable listening\n self._start_button = ipw.Button(description=u'\\u25B6', \n layout=ipw.Layout(margin='auto',\n border='solid white'))\n self._start_button.on_click(lambda _: self.start())\n self._start_button.style.button_color = 'lightgray'\n \n # Create a stop button to disable listening\n self._stop_button = ipw.Button(description=u'\\u25A0', \n layout=ipw.Layout(margin='auto',\n border='solid white'))\n self._stop_button.on_click(lambda _: self.stop())\n self._stop_button.style.button_color = 'tomato'\n \n # Set listening to false\n self._listening = False\n\n # Create Accordion\n self._accordion = ipw.Accordion(children=[\n ipw.HBox([ipw.VBox([self._text_terminal]),\n ipw.VBox([self._start_button, \n self._stop_button, \n self._clear_button,\n self._auto_clear_button],\n layout=ipw.Layout(align_self='flex-start'))\n ])\n ])\n self._accordion.set_title(0, description)\n \n def value(self):\n return self._text_terminal.value\n \n def append(self, value):\n if self._listening:\n if isinstance(value, str):\n if self._auto_clear and self._counter >= 10:\n self.clear()\n self._counter = 0\n self._text_terminal.value = self._text_terminal.value + value\n self._counter += 1\n else:\n raise ValueError('Terminal value must be a string.')\n \n def start(self):\n self._listening = True\n self._start_button.style.button_color = 'lightgreen'\n self._stop_button.style.button_color = 'lightgray'\n \n def stop(self):\n self._listening = False\n self._start_button.style.button_color = 'lightgray'\n self._stop_button.style.button_color = 'tomato'\n \n def clear(self):\n self._text_terminal.value = ('')\n self._counter = 0\n \n def autoclear(self):\n self._auto_clear = not self._auto_clear\n if self._auto_clear:\n self._auto_clear_button.style.button_color = 'lightblue'\n else:\n self._auto_clear_button.style.button_color = 'lightgray' \n \n def get_widget(self):\n return self._accordion\n\n \nclass Button():\n def __init__(self, description, callback, state = True, button_id = 0):\n \n # Set the button state\n self._state = state\n \n # Set button ID\n self._button_id = button_id\n \n # The callback to be ran\n self._callback = callback\n \n # Create a button object\n self._button = ipw.Button(description=description,\n layout=ipw.Layout(margin='auto',\n border='solid white'))\n self._button.on_click(lambda _: self.on_click())\n \n if self._state:\n self._button.style.button_color = 'lightgreen'\n else:\n self._button.style.button_color = 'tomato'\n \n def on_click(self):\n self._state = not self._state\n self._callback(value = self._state, button_id = self._button_id)\n if self._state:\n self._button.style.button_color = 'lightgreen'\n else:\n self._button.style.button_color = 'tomato'\n \n def get_widget(self):\n return self._button\n" }, { "alpha_fraction": 0.7291338443756104, "alphanum_fraction": 0.7425196766853333, "avg_line_length": 30.75, "blob_id": "fd5ce49057440865b192c1db612c573e07256009", "content_id": "5fb55fcd664354d832c88f138c8cd695b4e7e0d2", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1270, "license_type": "permissive", "max_line_length": 103, "num_lines": 40, "path": "/boards/ip/iprepo/bpsk_transmitter_v1_0/drivers/bpsk_transmitter_v1_0/src/bpsk_transmitter.c", "repo_name": "mfkiwl/BPSK-Transceiver", "src_encoding": "UTF-8", "text": "#include \"bpsk_transmitter.h\"\n#ifndef __linux__\nint bpsk_transmitter_CfgInitialize(bpsk_transmitter *InstancePtr, bpsk_transmitter_Config *ConfigPtr) {\n Xil_AssertNonvoid(InstancePtr != NULL);\n Xil_AssertNonvoid(ConfigPtr != NULL);\n\n InstancePtr->bpsk_transmitter_BaseAddress = ConfigPtr->bpsk_transmitter_BaseAddress;\n\n InstancePtr->IsReady = 1;\n return XST_SUCCESS;\n}\n#endif\nvoid bpsk_transmitter_enable_tx_write(bpsk_transmitter *InstancePtr, u32 Data) {\n\n Xil_AssertVoid(InstancePtr != NULL);\n\n bpsk_transmitter_WriteReg(InstancePtr->bpsk_transmitter_BaseAddress, 4, Data);\n}\nu32 bpsk_transmitter_enable_tx_read(bpsk_transmitter *InstancePtr) {\n\n u32 Data;\n Xil_AssertVoid(InstancePtr != NULL);\n\n Data = bpsk_transmitter_ReadReg(InstancePtr->bpsk_transmitter_BaseAddress, 4);\n return Data;\n}\nvoid bpsk_transmitter_enable_data_write(bpsk_transmitter *InstancePtr, u32 Data) {\n\n Xil_AssertVoid(InstancePtr != NULL);\n\n bpsk_transmitter_WriteReg(InstancePtr->bpsk_transmitter_BaseAddress, 0, Data);\n}\nu32 bpsk_transmitter_enable_data_read(bpsk_transmitter *InstancePtr) {\n\n u32 Data;\n Xil_AssertVoid(InstancePtr != NULL);\n\n Data = bpsk_transmitter_ReadReg(InstancePtr->bpsk_transmitter_BaseAddress, 0);\n return Data;\n}\n" }, { "alpha_fraction": 0.6822733283042908, "alphanum_fraction": 0.6958051323890686, "avg_line_length": 28.79838752746582, "blob_id": "57bc00a589c54390edd44fbe6a68511bfc6d2b0d", "content_id": "14ca7ee513079b74a13493af41217788c322cff5", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3695, "license_type": "permissive", "max_line_length": 105, "num_lines": 124, "path": "/boards/ip/iprepo/rfsoc_transmitter_v1_0/drivers/rfsoc_transmitter_v1_0/src/rfsoc_transmitter.h", "repo_name": "mfkiwl/BPSK-Transceiver", "src_encoding": "UTF-8", "text": "#ifndef RFSOC_TRANSMITTER__H\n#define RFSOC_TRANSMITTER__H\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n/***************************** Include Files *********************************/\n#ifndef __linux__\n#include \"xil_types.h\"\n#include \"xil_assert.h\"\n#include \"xstatus.h\"\n#include \"xil_io.h\"\n#else\n#include <stdint.h>\n#include <assert.h>\n#include <dirent.h>\n#include <fcntl.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <sys/mman.h>\n#include <unistd.h>\n#include <stddef.h>\n#endif\n#include \"rfsoc_transmitter_hw.h\"\n/**************************** Type Definitions ******************************/\n#ifdef __linux__\ntypedef uint8_t u8;\ntypedef uint16_t u16;\ntypedef uint32_t u32;\n#else\ntypedef struct {\n u16 DeviceId;\n u32 rfsoc_transmitter_BaseAddress;\n} rfsoc_transmitter_Config;\n#endif\n/**\n* The rfsoc_transmitter driver instance data. The user is required to\n* allocate a variable of this type for every rfsoc_transmitter device in the system.\n* A pointer to a variable of this type is then passed to the driver\n* API functions.\n*/\ntypedef struct {\n u32 rfsoc_transmitter_BaseAddress;\n u32 IsReady;\n} rfsoc_transmitter;\n/***************** Macros (Inline Functions) Definitions *********************/\n#ifndef __linux__\n#define rfsoc_transmitter_WriteReg(BaseAddress, RegOffset, Data) \\\n Xil_Out32((BaseAddress) + (RegOffset), (u32)(Data))\n#define rfsoc_transmitter_ReadReg(BaseAddress, RegOffset) \\\n Xil_In32((BaseAddress) + (RegOffset))\n#else\n#define rfsoc_transmitter_WriteReg(BaseAddress, RegOffset, Data) \\\n *(volatile u32*)((BaseAddress) + (RegOffset)) = (u32)(Data)\n#define rfsoc_transmitter_ReadReg(BaseAddress, RegOffset) \\\n *(volatile u32*)((BaseAddress) + (RegOffset))\n\n#define Xil_AssertVoid(expr) assert(expr)\n#define Xil_AssertNonvoid(expr) assert(expr)\n\n#define XST_SUCCESS 0\n#define XST_DEVICE_NOT_FOUND 2\n#define XST_OPEN_DEVICE_FAILED 3\n#define XIL_COMPONENT_IS_READY 1\n#endif\n/************************** Function Prototypes *****************************/\n#ifndef __linux__\nint rfsoc_transmitter_Initialize(rfsoc_transmitter *InstancePtr, u16 DeviceId);\nrfsoc_transmitter_Config* rfsoc_transmitter_LookupConfig(u16 DeviceId);\nint rfsoc_transmitter_CfgInitialize(rfsoc_transmitter *InstancePtr, rfsoc_transmitter_Config *ConfigPtr);\n#else\nint rfsoc_transmitter_Initialize(rfsoc_transmitter *InstancePtr, const char* InstanceName);\nint rfsoc_transmitter_Release(rfsoc_transmitter *InstancePtr);\n#endif\n/**\n* Write to enable_tx gateway of rfsoc_transmitter. Assignments are LSB-justified.\n*\n* @param\tInstancePtr is the enable_tx instance to operate on.\n* @param\tData is value to be written to gateway enable_tx.\n*\n* @return\tNone.\n*\n* @note .\n*\n*/\nvoid rfsoc_transmitter_enable_tx_write(rfsoc_transmitter *InstancePtr, u32 Data);\n/**\n* Read from enable_tx gateway of rfsoc_transmitter. Assignments are LSB-justified.\n*\n* @param\tInstancePtr is the enable_tx instance to operate on.\n*\n* @return\tu32\n*\n* @note .\n*\n*/\nu32 rfsoc_transmitter_enable_tx_read(rfsoc_transmitter *InstancePtr);\n/**\n* Write to enable_data gateway of rfsoc_transmitter. Assignments are LSB-justified.\n*\n* @param\tInstancePtr is the enable_data instance to operate on.\n* @param\tData is value to be written to gateway enable_data.\n*\n* @return\tNone.\n*\n* @note .\n*\n*/\nvoid rfsoc_transmitter_enable_data_write(rfsoc_transmitter *InstancePtr, u32 Data);\n/**\n* Read from enable_data gateway of rfsoc_transmitter. Assignments are LSB-justified.\n*\n* @param\tInstancePtr is the enable_data instance to operate on.\n*\n* @return\tu32\n*\n* @note .\n*\n*/\nu32 rfsoc_transmitter_enable_data_read(rfsoc_transmitter *InstancePtr);\n#ifdef __cplusplus\n}\n#endif\n#endif\n" }, { "alpha_fraction": 0.5637820959091187, "alphanum_fraction": 0.5804917812347412, "avg_line_length": 37.33854293823242, "blob_id": "ca2fddf8971a4790660b1cfb33e5f2e6499559ea", "content_id": "6c425d1b39177d6e14cc59551a5ef2881f55256a", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7361, "license_type": "permissive", "max_line_length": 145, "num_lines": 192, "path": "/rfsoc_radio/bpsk_transmitter.py", "repo_name": "mfkiwl/BPSK-Transceiver", "src_encoding": "UTF-8", "text": "__author__ = \"David Northcote\"\n__organisation__ = \"The Univeristy of Strathclyde\"\n__support__ = \"https://github.com/strath-sdr/rfsoc_radio\"\n\nfrom pynq import DefaultIP\nfrom pynq import allocate\nimport numpy as np\nfrom random import randint\nfrom .async_radio import AsyncRadioTx\nfrom .quick_widgets import TransmitTerminal\n\n\nclass BpskTransmitter():\n def __init__(self, axi_dma, bpsk_transmitter):\n \"\"\"Create a BPSK Transmitter object that controls the transmitter\n and corresponding AXI DMA for data movement between the PS and PL.\"\"\"\n super().__init__()\n\n def terminal_callback():\n data = self._terminal.value()\n if data is not '':\n self.data(data)\n self.start()\n self._terminal.clear()\n \n self.axi_dma = axi_dma\n self.controller = bpsk_transmitter\n \n self.controller.enable_data = 1\n self.controller.enable_transmitter = 1\n \n self.frame_size = 44\n self.random_size = 10\n \n self._flags = 0\n self._frame_number = 0\n self._tx_buff = self._create_buffer()\n \n self._message = self._prepare_message(np.array([72, 101, 108, 108, 111, 32, 87, 111, 114, 108, 100, 33], dtype=np.uint8))\n self.mode = 'single'\n \n # Create a new radio transmitter object\n self.monitor = AsyncRadioTx(rate=1, timer_callback=self._transfer)\n\n # Create a TransmitTerminal object for custom user ascii\n self._terminal = TransmitTerminal(description='Message to Transmit:')\n self._terminal.callback = [terminal_callback]\n \n def start(self):\n \"\"\"Start data transmission using the message buffer set\n through BpskTransmitter.data(data). The transmission ends once the\n entire message has sent or BpskTransmitter.stop() is called.\n \"\"\"\n if self.monitor.is_running:\n raise RuntimeError('Transmitter already started.')\n else:\n if self.mode == 'repeat':\n self.monitor.start()\n elif self.mode == 'single':\n self._transfer()\n else:\n raise ValueError('Transmitter mode should be repeat or single.')\n \n def data(self, data='Hello World'):\n \"\"\"Set the message buffer with ascii data. The ascii data is\n converted to numpy and stored in the message buffer awaiting transmission.\n \"\"\"\n if isinstance(data, str):\n msg = self._ascii_to_numpy(data)\n elif isinstance(data, bytes):\n msg = np.frombuffer(data, dtype=np.uint8)\n elif isinstance(data, np.ndarray):\n msg = message\n else:\n raise TypeError('Message must be string, numpy array, or bytes/bytearray.')\n self._message = self._prepare_message(msg)\n \n def stop(self):\n \"\"\"Stop data transmission if it is currently underway.\n \"\"\"\n self.monitor.stop()\n \n def _create_buffer(self, data=np.array([72, 101, 108, 108, 111, 32, 87, 111, 114, 108, 100, 33], dtype=np.uint8), eof=1, padding=0):\n \"\"\"Create a buffer that is loaded user data. Append the Extended Barker sequence\n to the user data and then pad with zeros\n \"\"\"\n self._flags = eof\n if data.size == 0:\n raise ValueError('Message size should be greater than 0.')\n msg = np.array(data, dtype=np.uint8)\n # Append Barker and Random Data\n bkr = np.array([0, 0, 63, 112, 28, len(msg) + 5, self._frame_number, self._flags, 5, len(msg), padding], dtype=np.uint8)\n rnd = np.array([randint(0, 255) for p in range(0, self.random_size)], dtype=np.uint8)\n seq = np.append(bkr, msg)\n seq = np.append(rnd, seq)\n pad = np.append(seq, np.zeros(int(np.ceil((len(rnd) + len(bkr) + len(msg))/32) * 32 - (len(rnd) + len(bkr) + len(msg))), dtype=np.uint8))\n buf = allocate(shape=(len(pad),), dtype=np.uint8)\n buf[:] = pad[:]\n return buf\n \n def _dma_transfer(self, pynqbuffer):\n self.axi_dma.sendchannel.transfer(pynqbuffer)\n self.axi_dma.sendchannel.wait()\n \n def _transfer(self):\n # Create new send buffer for message\n sof = 2\n eof = 0\n padding = 0\n for i in range(self._message[\"nframes\"]):\n if i > 0:\n sof = 0\n if i == self._message[\"nframes\"] - 1:\n eof = 1\n padding = self._message[\"padding\"]\n self._tx_buff.freebuffer()\n self._tx_buff = self._create_buffer(self._message[\"message\"][i], sof+eof, padding)\n\n # Send the message\n self._dma_transfer(self._tx_buff)\n self._frame_number += 1\n self._frame_number = 0\n \n def _ascii_to_numpy(self, message='Thou shall not sample higher than Nyquist... unless Bob says so.'):\n \"\"\"Given a custom ascii message, create a send and receive buffer, packages the message\n into a frame with extended barker sequence, and transmits the frame.\n \"\"\"\n if not message:\n raise ValueError('Message should contain data.')\n # Format the message as an integer\n return np.array(np.array(message, 'c').view(np.uint8), dtype = np.uint8)\n \n def _prepare_message(self, msg):\n \n if len(msg) > self.frame_size:\n # Get the number of packets that will be sent\n ncols = int(np.ceil(len(msg)/self.frame_size))\n\n # Pad the message with zeros as required by the number of packets\n nzeros = ncols*self.frame_size - len(msg)\n pad = np.append(msg, np.zeros(nzeros, dtype=np.uint8))\n\n # Reshape the array into packets by self.frame_size\n pckts = np.reshape(pad, (ncols, self.frame_size))\n\n # Create data frame\n data = {\n \"message\" : pckts,\n \"padding\" : nzeros,\n \"nframes\" : ncols\n }\n else:\n data = {\n \"message\" : np.reshape(msg, (1, len(msg))),\n \"padding\" : self.frame_size - len(msg),\n \"nframes\" : 1\n }\n\n return data\n\n def terminal(self):\n \"\"\"Returns a transmitter terminal object for inserting Ascii data\n for transmission.\n \"\"\"\n return self._terminal.get_widget()\n \nclass BpskTransmitterCore(DefaultIP):\n \"\"\"Driver for BPSK Transmitter's core logic IP\n Exposes all the configuration registers by name via data-driven properties\n \"\"\"\n def __init__(self, description):\n super().__init__(description=description)\n \n bindto = ['UoS:RFSoC:bpsk_transmitter:1.0']\n \n# LUT of property addresses for our data-driven properties\n_bpskTransmitter_props = [(\"enable_data\", 0),\n (\"enable_transmitter\", 4)]\n \n# Function to return a MMIO Getter and Setter based on a relative address\ndef _create_mmio_property(addr):\n def _get(self):\n return self.read(addr)\n \n def _set(self, value):\n self.write(addr, value)\n \n return property(_get, _set)\n \n# Generate getters and setters based on _bpskTransmitter_props\nfor (name, addr) in _bpskTransmitter_props:\n setattr(BpskTransmitterCore, name, _create_mmio_property(addr))\n" }, { "alpha_fraction": 0.6730430126190186, "alphanum_fraction": 0.6871381998062134, "avg_line_length": 26.027210235595703, "blob_id": "f1445edef215432b22017ffd35eaf9035427882e", "content_id": "fbe2cdefafced908e289101f7c8b66884551c59e", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3973, "license_type": "permissive", "max_line_length": 81, "num_lines": 147, "path": "/boards/ip/iprepo/inspector_v1_0/drivers/inspector_v1_0/src/inspector.h", "repo_name": "mfkiwl/BPSK-Transceiver", "src_encoding": "UTF-8", "text": "#ifndef INSPECTOR__H\n#define INSPECTOR__H\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n/***************************** Include Files *********************************/\n#ifndef __linux__\n#include \"xil_types.h\"\n#include \"xil_assert.h\"\n#include \"xstatus.h\"\n#include \"xil_io.h\"\n#else\n#include <stdint.h>\n#include <assert.h>\n#include <dirent.h>\n#include <fcntl.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <sys/mman.h>\n#include <unistd.h>\n#include <stddef.h>\n#endif\n#include \"inspector_hw.h\"\n/**************************** Type Definitions ******************************/\n#ifdef __linux__\ntypedef uint8_t u8;\ntypedef uint16_t u16;\ntypedef uint32_t u32;\n#else\ntypedef struct {\n u16 DeviceId;\n u32 inspector_BaseAddress;\n} inspector_Config;\n#endif\n/**\n* The inspector driver instance data. The user is required to\n* allocate a variable of this type for every inspector device in the system.\n* A pointer to a variable of this type is then passed to the driver\n* API functions.\n*/\ntypedef struct {\n u32 inspector_BaseAddress;\n u32 IsReady;\n} inspector;\n/***************** Macros (Inline Functions) Definitions *********************/\n#ifndef __linux__\n#define inspector_WriteReg(BaseAddress, RegOffset, Data) \\\n Xil_Out32((BaseAddress) + (RegOffset), (u32)(Data))\n#define inspector_ReadReg(BaseAddress, RegOffset) \\\n Xil_In32((BaseAddress) + (RegOffset))\n#else\n#define inspector_WriteReg(BaseAddress, RegOffset, Data) \\\n *(volatile u32*)((BaseAddress) + (RegOffset)) = (u32)(Data)\n#define inspector_ReadReg(BaseAddress, RegOffset) \\\n *(volatile u32*)((BaseAddress) + (RegOffset))\n\n#define Xil_AssertVoid(expr) assert(expr)\n#define Xil_AssertNonvoid(expr) assert(expr)\n\n#define XST_SUCCESS 0\n#define XST_DEVICE_NOT_FOUND 2\n#define XST_OPEN_DEVICE_FAILED 3\n#define XIL_COMPONENT_IS_READY 1\n#endif\n/************************** Function Prototypes *****************************/\n#ifndef __linux__\nint inspector_Initialize(inspector *InstancePtr, u16 DeviceId);\ninspector_Config* inspector_LookupConfig(u16 DeviceId);\nint inspector_CfgInitialize(inspector *InstancePtr, inspector_Config *ConfigPtr);\n#else\nint inspector_Initialize(inspector *InstancePtr, const char* InstanceName);\nint inspector_Release(inspector *InstancePtr);\n#endif\n/**\n* Write to reset_dim gateway of inspector. Assignments are LSB-justified.\n*\n* @param\tInstancePtr is the reset_dim instance to operate on.\n* @param\tData is value to be written to gateway reset_dim.\n*\n* @return\tNone.\n*\n* @note .\n*\n*/\nvoid inspector_reset_dim_write(inspector *InstancePtr, u32 Data);\n/**\n* Read from reset_dim gateway of inspector. Assignments are LSB-justified.\n*\n* @param\tInstancePtr is the reset_dim instance to operate on.\n*\n* @return\tu32\n*\n* @note .\n*\n*/\nu32 inspector_reset_dim_read(inspector *InstancePtr);\n/**\n* Write to packetsize_dim gateway of inspector. Assignments are LSB-justified.\n*\n* @param\tInstancePtr is the packetsize_dim instance to operate on.\n* @param\tData is value to be written to gateway packetsize_dim.\n*\n* @return\tNone.\n*\n* @note .\n*\n*/\nvoid inspector_packetsize_dim_write(inspector *InstancePtr, u32 Data);\n/**\n* Read from packetsize_dim gateway of inspector. Assignments are LSB-justified.\n*\n* @param\tInstancePtr is the packetsize_dim instance to operate on.\n*\n* @return\tu32\n*\n* @note .\n*\n*/\nu32 inspector_packetsize_dim_read(inspector *InstancePtr);\n/**\n* Write to enable_dim gateway of inspector. Assignments are LSB-justified.\n*\n* @param\tInstancePtr is the enable_dim instance to operate on.\n* @param\tData is value to be written to gateway enable_dim.\n*\n* @return\tNone.\n*\n* @note .\n*\n*/\nvoid inspector_enable_dim_write(inspector *InstancePtr, u32 Data);\n/**\n* Read from enable_dim gateway of inspector. Assignments are LSB-justified.\n*\n* @param\tInstancePtr is the enable_dim instance to operate on.\n*\n* @return\tu32\n*\n* @note .\n*\n*/\nu32 inspector_enable_dim_read(inspector *InstancePtr);\n#ifdef __cplusplus\n}\n#endif\n#endif\n" }, { "alpha_fraction": 0.5061759352684021, "alphanum_fraction": 0.5215221643447876, "avg_line_length": 37.533653259277344, "blob_id": "4446aa83eae5d0c1b54e453fc886c354e3c27774", "content_id": "02d08d7e24169f247178b55a647e55f4b5a54ad0", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8015, "license_type": "permissive", "max_line_length": 107, "num_lines": 208, "path": "/rfsoc_radio/bpsk_receiver.py", "repo_name": "mfkiwl/BPSK-Transceiver", "src_encoding": "UTF-8", "text": "__author__ = \"David Northcote\"\n__organisation__ = \"The Univeristy of Strathclyde\"\n__support__ = \"https://github.com/strath-sdr/rfsoc_radio\"\n\nfrom pynq import DefaultIP\nfrom pynq import allocate\nimport ipywidgets as ipw\nimport numpy as np\nfrom .quick_widgets import DropdownMenu, ReceiveTerminal\nfrom .async_radio import AsyncRadioRx\n\n\nclass BpskReceiver():\n def __init__(self, axi_dma, bpsk_receiver, inspector):\n \"\"\"Create a BPSK Receiver object that controls the receiver\n and corresonding AXI DMA for data movement between PS and PL.\"\"\"\n super().__init__()\n \n def on_signal_change(change):\n shape = ((64, ), (64, ), (512, ), (2048, ), (2048, ))\n freq = (100000, 100000, 3200000, 12800000, 12800000)\n self.inspector.set_frequency(freq[change['new']])\n self.controller.observation_point = change['new']\n self.inspector.set_shape(shape[change['new']])\n\n def terminal_callback():\n frame = self.frame\n payload = np.where(frame[\"payload\"] > 127, 0, frame[\"payload\"]).tostring().decode('ascii')\n if self._debug:\n data = 'Header: ' + str({i:frame[i] for i in frame if i!=\"payload\"}) \\\n + '\\rPayload: ' + payload + '\\r\\r'\n else:\n data = payload\n self._terminal.append(data)\n \n \"\"\"Initialise objects\"\"\"\n # Create AXI DMA object\n self.axi_dma = axi_dma\n \n # Create BPSK Receive Controller\n self.controller = bpsk_receiver\n \n # Create inspector module\n self.inspector = inspector\n \n # Create asynchronous radio receiver\n self.monitor = AsyncRadioRx(irq = self.controller.irq, \n irq_callback = self._transfer)\n \n \"\"\"AXI DMA Buffer initialisation\"\"\"\n # Allocate default memory for the _rx_buffer receiver buffer\n self._rx_buff = allocate(shape=(32,), dtype = np.uint8)\n \n \"\"\"Inspector initialisation\"\"\"\n # Create a new signal selector widget\n self._s_sel = DropdownMenu([('Phase Synchronisation', 0),\n ('Time Synchronisation', 1),\n ('Raised Cosine Filter', 2),\n ('Coarse Synchronisation', 3),\n ('CIC Decimator', 4)],\n 'Observation Point:',\n 0)\n \n # Observe the dropdown menu for changes\n self._s_sel._dropdown_menu.observe(on_signal_change, names='value')\n\n # Create a receiver terminal object and set callback\n self._terminal = ReceiveTerminal(description='Received Messages:')\n self.monitor.callback = [terminal_callback]\n self._terminal.start()\n\n # Create a debug button for our text terminal\n self._debug_button = ipw.Button(description='Debug',\n layout=ipw.Layout(margin='auto',\n border='solid white'))\n self._debug_button.on_click(lambda _: self._toggle_debug())\n self._debug_button.style.button_color = 'lightgray'\n \n \"\"\"Monitor initialisation\"\"\"\n self.monitor.start()\n\n \"\"\"Receiver initialisation\"\"\"\n # Create empty receiver frame\n self.frame = {\n \"number\" : 0,\n \"flags\" : 0,\n \"length\" : {\n \"total\" : 0,\n \"header\" : 0,\n \"payload\" : 0,\n \"padding\" : 0\n },\n \"payload\" : 0\n }\n\n # Set terminal debug mode\n self._debug = False\n\n def _toggle_debug(self):\n if self._debug:\n self._debug = False\n self._debug_button.style.button_color = 'lightgray'\n else:\n self._debug = True\n self._debug_button.style.button_color = 'lightblue'\n \n def _dma_transfer(self, pynqbuffer):\n self.axi_dma.recvchannel.transfer(pynqbuffer)\n self.controller.transfer = 1\n self.axi_dma.recvchannel.wait()\n self.controller.transfer = 0\n \n def _transfer(self):\n buff_len = self.controller.receive_size\n \n if buff_len > 0:\n # Create new receive buffer for message\n self._rx_buff.freebuffer()\n self._rx_buff = allocate(shape=(buff_len,), dtype = np.uint8)\n\n # Prepare to receive the message\n self._dma_transfer(self._rx_buff)\n \n # Obtain the message\n self._message = np.array(self._rx_buff.astype(np.uint32), \\\n dtype = np.uint8)[5:len(self._rx_buff)]\n \n # Set frame to allow the user to read the frame data\n self.frame = {\n \"number\" : self._rx_buff[0],\n \"flags\" : self._rx_buff[1],\n \"length\" : {\n \"total\" : buff_len,\n \"header\" : self._rx_buff[2],\n \"payload\" : self._rx_buff[3],\n \"padding\" : self._rx_buff[4]\n },\n \"payload\" : self._message\n }\n \n def visualise(self):\n \"\"\"Returns widgets for inspecting and controlling signal paths in our radio.\n \"\"\"\n name = ['Time', 'Spectrum', 'Constellation']\n children = [self.inspector.time_plot(),\n self.inspector.spectrum_plot(),\n self.inspector.constellation_plot()]\n tab = ipw.Tab(children=children,\n layout=ipw.Layout(height='initial',\n width='initial'))\n for i in range(0, len(children)):\n tab.set_title(i, name[i])\n control_buttons = self.inspector.plot_control()\n rx_accordion = ipw.Accordion(children=[\n ipw.VBox([tab, \n ipw.HBox([self._s_sel.get_widget(), control_buttons[0], control_buttons[1]])])])\n rx_accordion.set_title(0, 'Receiver Visualisation')\n return rx_accordion\n\n def terminal(self):\n \"\"\"Returns a receiver terminal object for printing Ascii data\n for the receiver.\n \"\"\"\n terminal = self._terminal.get_widget()\n terminal.children[0].children[1].children = tuple(list(terminal.children[0].children[1].children) +\n [self._debug_button])\n return terminal\n \nclass BpskReceiverCore(DefaultIP):\n \"\"\"Driver for BPSK Receiver's core logic IP\n Exposes all the configuration registers by name via data-driven properties\n \"\"\"\n def __init__(self, description):\n super().__init__(description=description)\n \n bindto = ['strath.ac.uk:RFSoC:bpsk_receiver:1.0']\n \n# LUT of property addresses for our data-driven properties\n_bpskReceiver_props = [(\"reset_time_sync\", 0),\n (\"reset_phase_sync\", 4),\n (\"reset_frame_sync\", 8),\n (\"threshold\", 12),\n (\"transfer\", 28),\n (\"observation_point\", 36),\n (\"fifo_count\", 32),\n (\"receive_size\", 20),\n (\"packet_count\", 24),\n (\"coarse_passthrough\", 44),\n (\"freq_offset\", 40)]\n \n# Function to return a MMIO Getter and Setter based on a relative address\ndef _create_mmio_property(addr):\n def _get(self):\n value = self.read(addr)\n if addr == 40:\n data = -((value-(2**32)*int(str((value)>>32-1)))*2**-10)\n else:\n data = value\n return data\n \n def _set(self, value):\n self.write(addr, value)\n \n return property(_get, _set)\n \n# Generate getters and setters based on _bpskReceiver_props\nfor (name, addr) in _bpskReceiver_props:\n setattr(BpskReceiverCore, name, _create_mmio_property(addr))\n" }, { "alpha_fraction": 0.7536466717720032, "alphanum_fraction": 0.7692058086395264, "avg_line_length": 45.044776916503906, "blob_id": "3e0b6ecbb97fe7241344d175c06416e6c787da91", "content_id": "4a71ebbe0a823f409f7725e843b24161393ff925", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3085, "license_type": "permissive", "max_line_length": 495, "num_lines": 67, "path": "/README.md", "repo_name": "mfkiwl/BPSK-Transceiver", "src_encoding": "UTF-8", "text": "<img src=\"strathsdr_banner.png\" width=\"100%\">\n\n# RFSoC BPSK Transceiver\nThis repository is only compatible with [PYNQ images v2.6](https://github.com/Xilinx/PYNQ/releases) for the [ZCU111](https://www.xilinx.com/products/boards-and-kits/zcu111.html) and [RFSoC2x2](http://rfsoc-pynq.io/).\n\n## Introduction\nThis repository contains a BPSK transceiver radio design for RFSoC platforms. The radio is capable of transmitting and receiving BPSK modulated waveforms in loopback, or between RFSoC development boards running the same design. A simple \"hello world\" example is presented demonstrating that transmitted BPSK waveforms can be received, synchronised, and the payload extracted for analysis. Check out the demonstration below and the quick start guide to install the project on your RFSoC platform.\n\n<p align=\"center\">\n <img src=\"demonstration.gif\"/>\n<p/>\n\n## Quick Start\nFollow the instructions below to install the BPSK radio demonstrator on your development board. **You will need to give your board access to the internet**.\n* Power on your RFSoC2x2 or ZCU111 development board with an SD Card containing a fresh PYNQ v2.6 image.\n* Navigate to Jupyter Labs by opening a browser (preferably Chrome) and connecting to `http://<board_ip_address>:9090/lab`.\n* We need to open a terminal in Jupyter Lab. Firstly, open a launcher window as shown in the figure below:\n\n<p align=\"center\">\n <img src=\"../master/open_jupyter_launcher.jpg\" width=\"50%\" height=\"50%\" />\n<p/>\n\n* Now open a terminal in Jupyter as illustrated below:\n\n<p align=\"center\">\n <img src=\"../master/open_terminal_window.jpg\" width=\"50%\" height=\"50%\" />\n<p/>\n\n* Now simply install the BPSK radio demonstrator through PIP by executing the following command in the terminal:\n\n```sh\npip3 install git+https://github.com/strath-sdr/rfsoc_radio\n```\n\nOnce installation has complete, you will find the BPSK radio demonstrator notebooks located in the jupyter home workspace in the `bpsk-demonstrator` folder.\n\n## Using the Project Files\nThe following software is required to use the project files in this repository.\n- Vivado Design Suite 2020.1\n- System Generator for DSP\n- MATLAB R2020a\n\n### System Generator\nThe Tx and Rx IPs are in separate directories in `rfsoc_radio/boards/ip/sysgen/` that can be opened using the appropriate System Generator dialogue. Due to the large amount of decimation and interpolation in both IPs, simulating the output can take an extraordinarily long time. A less extreme multirate system would simulate much faster! \n\n### Vivado\nThis project can be built with Vivado from the command line. Open Vivado 2020.1 and execute the following into the tcl console:\n\n```sh\ncd /<repository-location>/boards/<board-name>/rfsoc_radio/\n```\n\nNow that we have moved into the correct directory, make the Vivado project by running the make commands below sequentially.\n\n```sh\nmake project\nmake block_design\nmake bitstream_file\n```\n\nAlternatively, you can run the entire project build by executing the following into the tcl console:\n```sh\nmake all\n```\n\n## License \n[BSD 3-Clause](../../blob/master/LICENSE)\n" }, { "alpha_fraction": 0.6791473031044006, "alphanum_fraction": 0.6928122639656067, "avg_line_length": 28.50806427001953, "blob_id": "6123d59772c795b7ab605d6c8b04714a573b7bcd", "content_id": "247ccc1e1adfc48089ba7de27a242d09d39eb0b3", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3659, "license_type": "permissive", "max_line_length": 102, "num_lines": 124, "path": "/boards/ip/iprepo/bpsk_transmitter_v1_0/drivers/bpsk_transmitter_v1_0/src/bpsk_transmitter.h", "repo_name": "mfkiwl/BPSK-Transceiver", "src_encoding": "UTF-8", "text": "#ifndef BPSK_TRANSMITTER__H\n#define BPSK_TRANSMITTER__H\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n/***************************** Include Files *********************************/\n#ifndef __linux__\n#include \"xil_types.h\"\n#include \"xil_assert.h\"\n#include \"xstatus.h\"\n#include \"xil_io.h\"\n#else\n#include <stdint.h>\n#include <assert.h>\n#include <dirent.h>\n#include <fcntl.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <sys/mman.h>\n#include <unistd.h>\n#include <stddef.h>\n#endif\n#include \"bpsk_transmitter_hw.h\"\n/**************************** Type Definitions ******************************/\n#ifdef __linux__\ntypedef uint8_t u8;\ntypedef uint16_t u16;\ntypedef uint32_t u32;\n#else\ntypedef struct {\n u16 DeviceId;\n u32 bpsk_transmitter_BaseAddress;\n} bpsk_transmitter_Config;\n#endif\n/**\n* The bpsk_transmitter driver instance data. The user is required to\n* allocate a variable of this type for every bpsk_transmitter device in the system.\n* A pointer to a variable of this type is then passed to the driver\n* API functions.\n*/\ntypedef struct {\n u32 bpsk_transmitter_BaseAddress;\n u32 IsReady;\n} bpsk_transmitter;\n/***************** Macros (Inline Functions) Definitions *********************/\n#ifndef __linux__\n#define bpsk_transmitter_WriteReg(BaseAddress, RegOffset, Data) \\\n Xil_Out32((BaseAddress) + (RegOffset), (u32)(Data))\n#define bpsk_transmitter_ReadReg(BaseAddress, RegOffset) \\\n Xil_In32((BaseAddress) + (RegOffset))\n#else\n#define bpsk_transmitter_WriteReg(BaseAddress, RegOffset, Data) \\\n *(volatile u32*)((BaseAddress) + (RegOffset)) = (u32)(Data)\n#define bpsk_transmitter_ReadReg(BaseAddress, RegOffset) \\\n *(volatile u32*)((BaseAddress) + (RegOffset))\n\n#define Xil_AssertVoid(expr) assert(expr)\n#define Xil_AssertNonvoid(expr) assert(expr)\n\n#define XST_SUCCESS 0\n#define XST_DEVICE_NOT_FOUND 2\n#define XST_OPEN_DEVICE_FAILED 3\n#define XIL_COMPONENT_IS_READY 1\n#endif\n/************************** Function Prototypes *****************************/\n#ifndef __linux__\nint bpsk_transmitter_Initialize(bpsk_transmitter *InstancePtr, u16 DeviceId);\nbpsk_transmitter_Config* bpsk_transmitter_LookupConfig(u16 DeviceId);\nint bpsk_transmitter_CfgInitialize(bpsk_transmitter *InstancePtr, bpsk_transmitter_Config *ConfigPtr);\n#else\nint bpsk_transmitter_Initialize(bpsk_transmitter *InstancePtr, const char* InstanceName);\nint bpsk_transmitter_Release(bpsk_transmitter *InstancePtr);\n#endif\n/**\n* Write to enable_tx gateway of bpsk_transmitter. Assignments are LSB-justified.\n*\n* @param\tInstancePtr is the enable_tx instance to operate on.\n* @param\tData is value to be written to gateway enable_tx.\n*\n* @return\tNone.\n*\n* @note .\n*\n*/\nvoid bpsk_transmitter_enable_tx_write(bpsk_transmitter *InstancePtr, u32 Data);\n/**\n* Read from enable_tx gateway of bpsk_transmitter. Assignments are LSB-justified.\n*\n* @param\tInstancePtr is the enable_tx instance to operate on.\n*\n* @return\tu32\n*\n* @note .\n*\n*/\nu32 bpsk_transmitter_enable_tx_read(bpsk_transmitter *InstancePtr);\n/**\n* Write to enable_data gateway of bpsk_transmitter. Assignments are LSB-justified.\n*\n* @param\tInstancePtr is the enable_data instance to operate on.\n* @param\tData is value to be written to gateway enable_data.\n*\n* @return\tNone.\n*\n* @note .\n*\n*/\nvoid bpsk_transmitter_enable_data_write(bpsk_transmitter *InstancePtr, u32 Data);\n/**\n* Read from enable_data gateway of bpsk_transmitter. Assignments are LSB-justified.\n*\n* @param\tInstancePtr is the enable_data instance to operate on.\n*\n* @return\tu32\n*\n* @note .\n*\n*/\nu32 bpsk_transmitter_enable_data_read(bpsk_transmitter *InstancePtr);\n#ifdef __cplusplus\n}\n#endif\n#endif\n" }, { "alpha_fraction": 0.7208737730979919, "alphanum_fraction": 0.7354369163513184, "avg_line_length": 36.45454406738281, "blob_id": "0c62525ebc7d20af6f1d8c103cf24f9a84dd91d4", "content_id": "ee428e19e5e3d32b395436aeb9c0ead2f15d3bfb", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 412, "license_type": "permissive", "max_line_length": 78, "num_lines": 11, "path": "/boards/ip/iprepo/inspector_v1_0/drivers/inspector_v1_0/src/inspector_hw.h", "repo_name": "mfkiwl/BPSK-Transceiver", "src_encoding": "UTF-8", "text": "/**\n*\n* @file inspector_hw.h\n*\n* This header file contains identifiers and driver functions (or\n* macros) that can be used to access the device. The user should refer to the\n* hardware device specification for more details of the device operation.\n*/ \n#define INSPECTOR_RESET_DIM 0x0/**< reset_dim */\n#define INSPECTOR_PACKETSIZE_DIM 0x8/**< packetsize_dim */\n#define INSPECTOR_ENABLE_DIM 0x4/**< enable_dim */\n" }, { "alpha_fraction": 0.6973150968551636, "alphanum_fraction": 0.7089665532112122, "avg_line_length": 25.85714340209961, "blob_id": "063001c122c90a4d5f4624e76df4962d00a9f911", "content_id": "a7d4d0f13d382f2119d802e4c9dda1b128ee08f4", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 7896, "license_type": "permissive", "max_line_length": 93, "num_lines": 294, "path": "/boards/ip/iprepo/bpsk_receiver_v1_0/drivers/bpsk_receiver_v1_0/src/bpsk_receiver.h", "repo_name": "mfkiwl/BPSK-Transceiver", "src_encoding": "UTF-8", "text": "#ifndef BPSK_RECEIVER__H\n#define BPSK_RECEIVER__H\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n/***************************** Include Files *********************************/\n#ifndef __linux__\n#include \"xil_types.h\"\n#include \"xil_assert.h\"\n#include \"xstatus.h\"\n#include \"xil_io.h\"\n#else\n#include <stdint.h>\n#include <assert.h>\n#include <dirent.h>\n#include <fcntl.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <sys/mman.h>\n#include <unistd.h>\n#include <stddef.h>\n#endif\n#include \"bpsk_receiver_hw.h\"\n/**************************** Type Definitions ******************************/\n#ifdef __linux__\ntypedef uint8_t u8;\ntypedef uint16_t u16;\ntypedef uint32_t u32;\n#else\ntypedef struct {\n u16 DeviceId;\n u32 bpsk_receiver_BaseAddress;\n} bpsk_receiver_Config;\n#endif\n/**\n* The bpsk_receiver driver instance data. The user is required to\n* allocate a variable of this type for every bpsk_receiver device in the system.\n* A pointer to a variable of this type is then passed to the driver\n* API functions.\n*/\ntypedef struct {\n u32 bpsk_receiver_BaseAddress;\n u32 IsReady;\n} bpsk_receiver;\n/***************** Macros (Inline Functions) Definitions *********************/\n#ifndef __linux__\n#define bpsk_receiver_WriteReg(BaseAddress, RegOffset, Data) \\\n Xil_Out32((BaseAddress) + (RegOffset), (u32)(Data))\n#define bpsk_receiver_ReadReg(BaseAddress, RegOffset) \\\n Xil_In32((BaseAddress) + (RegOffset))\n#else\n#define bpsk_receiver_WriteReg(BaseAddress, RegOffset, Data) \\\n *(volatile u32*)((BaseAddress) + (RegOffset)) = (u32)(Data)\n#define bpsk_receiver_ReadReg(BaseAddress, RegOffset) \\\n *(volatile u32*)((BaseAddress) + (RegOffset))\n\n#define Xil_AssertVoid(expr) assert(expr)\n#define Xil_AssertNonvoid(expr) assert(expr)\n\n#define XST_SUCCESS 0\n#define XST_DEVICE_NOT_FOUND 2\n#define XST_OPEN_DEVICE_FAILED 3\n#define XIL_COMPONENT_IS_READY 1\n#endif\n/************************** Function Prototypes *****************************/\n#ifndef __linux__\nint bpsk_receiver_Initialize(bpsk_receiver *InstancePtr, u16 DeviceId);\nbpsk_receiver_Config* bpsk_receiver_LookupConfig(u16 DeviceId);\nint bpsk_receiver_CfgInitialize(bpsk_receiver *InstancePtr, bpsk_receiver_Config *ConfigPtr);\n#else\nint bpsk_receiver_Initialize(bpsk_receiver *InstancePtr, const char* InstanceName);\nint bpsk_receiver_Release(bpsk_receiver *InstancePtr);\n#endif\n/**\n* Write to threshold gateway of bpsk_receiver. Assignments are LSB-justified.\n*\n* @param\tInstancePtr is the threshold instance to operate on.\n* @param\tData is value to be written to gateway threshold.\n*\n* @return\tNone.\n*\n* @note .\n*\n*/\nvoid bpsk_receiver_threshold_write(bpsk_receiver *InstancePtr, u32 Data);\n/**\n* Read from threshold gateway of bpsk_receiver. Assignments are LSB-justified.\n*\n* @param\tInstancePtr is the threshold instance to operate on.\n*\n* @return\tu32\n*\n* @note .\n*\n*/\nu32 bpsk_receiver_threshold_read(bpsk_receiver *InstancePtr);\n/**\n* Write to reset_time_sync gateway of bpsk_receiver. Assignments are LSB-justified.\n*\n* @param\tInstancePtr is the reset_time_sync instance to operate on.\n* @param\tData is value to be written to gateway reset_time_sync.\n*\n* @return\tNone.\n*\n* @note .\n*\n*/\nvoid bpsk_receiver_reset_time_sync_write(bpsk_receiver *InstancePtr, u32 Data);\n/**\n* Read from reset_time_sync gateway of bpsk_receiver. Assignments are LSB-justified.\n*\n* @param\tInstancePtr is the reset_time_sync instance to operate on.\n*\n* @return\tu32\n*\n* @note .\n*\n*/\nu32 bpsk_receiver_reset_time_sync_read(bpsk_receiver *InstancePtr);\n/**\n* Write to reset_phase_sync gateway of bpsk_receiver. Assignments are LSB-justified.\n*\n* @param\tInstancePtr is the reset_phase_sync instance to operate on.\n* @param\tData is value to be written to gateway reset_phase_sync.\n*\n* @return\tNone.\n*\n* @note .\n*\n*/\nvoid bpsk_receiver_reset_phase_sync_write(bpsk_receiver *InstancePtr, u32 Data);\n/**\n* Read from reset_phase_sync gateway of bpsk_receiver. Assignments are LSB-justified.\n*\n* @param\tInstancePtr is the reset_phase_sync instance to operate on.\n*\n* @return\tu32\n*\n* @note .\n*\n*/\nu32 bpsk_receiver_reset_phase_sync_read(bpsk_receiver *InstancePtr);\n/**\n* Write to reset_frame_sync gateway of bpsk_receiver. Assignments are LSB-justified.\n*\n* @param\tInstancePtr is the reset_frame_sync instance to operate on.\n* @param\tData is value to be written to gateway reset_frame_sync.\n*\n* @return\tNone.\n*\n* @note .\n*\n*/\nvoid bpsk_receiver_reset_frame_sync_write(bpsk_receiver *InstancePtr, u32 Data);\n/**\n* Read from reset_frame_sync gateway of bpsk_receiver. Assignments are LSB-justified.\n*\n* @param\tInstancePtr is the reset_frame_sync instance to operate on.\n*\n* @return\tu32\n*\n* @note .\n*\n*/\nu32 bpsk_receiver_reset_frame_sync_read(bpsk_receiver *InstancePtr);\n/**\n* Write to observation_point gateway of bpsk_receiver. Assignments are LSB-justified.\n*\n* @param\tInstancePtr is the observation_point instance to operate on.\n* @param\tData is value to be written to gateway observation_point.\n*\n* @return\tNone.\n*\n* @note .\n*\n*/\nvoid bpsk_receiver_observation_point_write(bpsk_receiver *InstancePtr, u32 Data);\n/**\n* Read from observation_point gateway of bpsk_receiver. Assignments are LSB-justified.\n*\n* @param\tInstancePtr is the observation_point instance to operate on.\n*\n* @return\tu32\n*\n* @note .\n*\n*/\nu32 bpsk_receiver_observation_point_read(bpsk_receiver *InstancePtr);\n/**\n* Write to enable_transfer gateway of bpsk_receiver. Assignments are LSB-justified.\n*\n* @param\tInstancePtr is the enable_transfer instance to operate on.\n* @param\tData is value to be written to gateway enable_transfer.\n*\n* @return\tNone.\n*\n* @note .\n*\n*/\nvoid bpsk_receiver_enable_transfer_write(bpsk_receiver *InstancePtr, u32 Data);\n/**\n* Read from enable_transfer gateway of bpsk_receiver. Assignments are LSB-justified.\n*\n* @param\tInstancePtr is the enable_transfer instance to operate on.\n*\n* @return\tu32\n*\n* @note .\n*\n*/\nu32 bpsk_receiver_enable_transfer_read(bpsk_receiver *InstancePtr);\n/**\n* Write to coarse_passthrough gateway of bpsk_receiver. Assignments are LSB-justified.\n*\n* @param\tInstancePtr is the coarse_passthrough instance to operate on.\n* @param\tData is value to be written to gateway coarse_passthrough.\n*\n* @return\tNone.\n*\n* @note .\n*\n*/\nvoid bpsk_receiver_coarse_passthrough_write(bpsk_receiver *InstancePtr, u32 Data);\n/**\n* Read from coarse_passthrough gateway of bpsk_receiver. Assignments are LSB-justified.\n*\n* @param\tInstancePtr is the coarse_passthrough instance to operate on.\n*\n* @return\tu32\n*\n* @note .\n*\n*/\nu32 bpsk_receiver_coarse_passthrough_read(bpsk_receiver *InstancePtr);\n/**\n* Read from data_count gateway of bpsk_receiver. Assignments are LSB-justified.\n*\n* @param\tInstancePtr is the data_count instance to operate on.\n*\n* @return\tu32\n*\n* @note .\n*\n*/\nu32 bpsk_receiver_data_count_read(bpsk_receiver *InstancePtr);\n/**\n* Read from frame_size gateway of bpsk_receiver. Assignments are LSB-justified.\n*\n* @param\tInstancePtr is the frame_size instance to operate on.\n*\n* @return\tu8\n*\n* @note .\n*\n*/\nu8 bpsk_receiver_frame_size_read(bpsk_receiver *InstancePtr);\n/**\n* Read from freq_offset gateway of bpsk_receiver. Assignments are LSB-justified.\n*\n* @param\tInstancePtr is the freq_offset instance to operate on.\n*\n* @return\tint\n*\n* @note .\n*\n*/\nint bpsk_receiver_freq_offset_read(bpsk_receiver *InstancePtr);\n/**\n* Read from packet_count gateway of bpsk_receiver. Assignments are LSB-justified.\n*\n* @param\tInstancePtr is the packet_count instance to operate on.\n*\n* @return\tu32\n*\n* @note .\n*\n*/\nu32 bpsk_receiver_packet_count_read(bpsk_receiver *InstancePtr);\n/**\n* Read from packet_size gateway of bpsk_receiver. Assignments are LSB-justified.\n*\n* @param\tInstancePtr is the packet_size instance to operate on.\n*\n* @return\tu8\n*\n* @note .\n*\n*/\nu8 bpsk_receiver_packet_size_read(bpsk_receiver *InstancePtr);\n#ifdef __cplusplus\n}\n#endif\n#endif\n" }, { "alpha_fraction": 0.6996763944625854, "alphanum_fraction": 0.7035598754882812, "avg_line_length": 28.711538314819336, "blob_id": "62b24b90d96fdcd53d3de07d22fe8b20570356d1", "content_id": "09a0578c5febb8f5a35d64c74b51d6171d642e65", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1545, "license_type": "permissive", "max_line_length": 80, "num_lines": 52, "path": "/boards/ip/iprepo/rfsoc_transmitter_v1_0/drivers/rfsoc_transmitter_v1_0/src/rfsoc_transmitter_sinit.c", "repo_name": "mfkiwl/BPSK-Transceiver", "src_encoding": "UTF-8", "text": "/**\n* @file rfsoc_transmitter_sinit.c\n*\n* The implementation of the rfsoc_transmitter driver's static initialzation\n* functionality.\n*\n* @note\n*\n* None\n*\n*/\n#ifndef __linux__\n#include \"xstatus.h\"\n#include \"xparameters.h\"\n#include \"rfsoc_transmitter.h\"\nextern rfsoc_transmitter_Config rfsoc_transmitter_ConfigTable[];\n/**\n* Lookup the device configuration based on the unique device ID. The table\n* ConfigTable contains the configuration info for each device in the system.\n*\n* @param DeviceId is the device identifier to lookup.\n*\n* @return\n* - A pointer of data type rfsoc_transmitter_Config which\n* points to the device configuration if DeviceID is found.\n* - NULL if DeviceID is not found.\n*\n* @note None.\n*\n*/\nrfsoc_transmitter_Config *rfsoc_transmitter_LookupConfig(u16 DeviceId) {\n rfsoc_transmitter_Config *ConfigPtr = NULL;\n int Index;\n for (Index = 0; Index < XPAR_RFSOC_TRANSMITTER_NUM_INSTANCES; Index++) {\n if (rfsoc_transmitter_ConfigTable[Index].DeviceId == DeviceId) {\n ConfigPtr = &rfsoc_transmitter_ConfigTable[Index];\n break;\n }\n }\n return ConfigPtr;\n}\nint rfsoc_transmitter_Initialize(rfsoc_transmitter *InstancePtr, u16 DeviceId) {\n rfsoc_transmitter_Config *ConfigPtr;\n Xil_AssertNonvoid(InstancePtr != NULL);\n ConfigPtr = rfsoc_transmitter_LookupConfig(DeviceId);\n if (ConfigPtr == NULL) {\n InstancePtr->IsReady = 0;\n return (XST_DEVICE_NOT_FOUND);\n }\n return rfsoc_transmitter_CfgInitialize(InstancePtr, ConfigPtr);\n}\n#endif\n" }, { "alpha_fraction": 0.707125186920166, "alphanum_fraction": 0.7266801595687866, "avg_line_length": 28.65999984741211, "blob_id": "7fb0ae4cbf8e9ffede9d9f56354cfd0b6ae13161", "content_id": "4d11d3c4e778706a1ac9131f8cdd015aece0135d", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 4449, "license_type": "permissive", "max_line_length": 94, "num_lines": 150, "path": "/boards/ip/iprepo/bpsk_receiver_v1_0/drivers/bpsk_receiver_v1_0/src/bpsk_receiver.c", "repo_name": "mfkiwl/BPSK-Transceiver", "src_encoding": "UTF-8", "text": "#include \"bpsk_receiver.h\"\n#ifndef __linux__\nint bpsk_receiver_CfgInitialize(bpsk_receiver *InstancePtr, bpsk_receiver_Config *ConfigPtr) {\n Xil_AssertNonvoid(InstancePtr != NULL);\n Xil_AssertNonvoid(ConfigPtr != NULL);\n\n InstancePtr->bpsk_receiver_BaseAddress = ConfigPtr->bpsk_receiver_BaseAddress;\n\n InstancePtr->IsReady = 1;\n return XST_SUCCESS;\n}\n#endif\nvoid bpsk_receiver_threshold_write(bpsk_receiver *InstancePtr, u32 Data) {\n\n Xil_AssertVoid(InstancePtr != NULL);\n\n bpsk_receiver_WriteReg(InstancePtr->bpsk_receiver_BaseAddress, 12, Data);\n}\nu32 bpsk_receiver_threshold_read(bpsk_receiver *InstancePtr) {\n\n u32 Data;\n Xil_AssertVoid(InstancePtr != NULL);\n\n Data = bpsk_receiver_ReadReg(InstancePtr->bpsk_receiver_BaseAddress, 12);\n return Data;\n}\nvoid bpsk_receiver_reset_time_sync_write(bpsk_receiver *InstancePtr, u32 Data) {\n\n Xil_AssertVoid(InstancePtr != NULL);\n\n bpsk_receiver_WriteReg(InstancePtr->bpsk_receiver_BaseAddress, 0, Data);\n}\nu32 bpsk_receiver_reset_time_sync_read(bpsk_receiver *InstancePtr) {\n\n u32 Data;\n Xil_AssertVoid(InstancePtr != NULL);\n\n Data = bpsk_receiver_ReadReg(InstancePtr->bpsk_receiver_BaseAddress, 0);\n return Data;\n}\nvoid bpsk_receiver_reset_phase_sync_write(bpsk_receiver *InstancePtr, u32 Data) {\n\n Xil_AssertVoid(InstancePtr != NULL);\n\n bpsk_receiver_WriteReg(InstancePtr->bpsk_receiver_BaseAddress, 4, Data);\n}\nu32 bpsk_receiver_reset_phase_sync_read(bpsk_receiver *InstancePtr) {\n\n u32 Data;\n Xil_AssertVoid(InstancePtr != NULL);\n\n Data = bpsk_receiver_ReadReg(InstancePtr->bpsk_receiver_BaseAddress, 4);\n return Data;\n}\nvoid bpsk_receiver_reset_frame_sync_write(bpsk_receiver *InstancePtr, u32 Data) {\n\n Xil_AssertVoid(InstancePtr != NULL);\n\n bpsk_receiver_WriteReg(InstancePtr->bpsk_receiver_BaseAddress, 8, Data);\n}\nu32 bpsk_receiver_reset_frame_sync_read(bpsk_receiver *InstancePtr) {\n\n u32 Data;\n Xil_AssertVoid(InstancePtr != NULL);\n\n Data = bpsk_receiver_ReadReg(InstancePtr->bpsk_receiver_BaseAddress, 8);\n return Data;\n}\nvoid bpsk_receiver_observation_point_write(bpsk_receiver *InstancePtr, u32 Data) {\n\n Xil_AssertVoid(InstancePtr != NULL);\n\n bpsk_receiver_WriteReg(InstancePtr->bpsk_receiver_BaseAddress, 36, Data);\n}\nu32 bpsk_receiver_observation_point_read(bpsk_receiver *InstancePtr) {\n\n u32 Data;\n Xil_AssertVoid(InstancePtr != NULL);\n\n Data = bpsk_receiver_ReadReg(InstancePtr->bpsk_receiver_BaseAddress, 36);\n return Data;\n}\nvoid bpsk_receiver_enable_transfer_write(bpsk_receiver *InstancePtr, u32 Data) {\n\n Xil_AssertVoid(InstancePtr != NULL);\n\n bpsk_receiver_WriteReg(InstancePtr->bpsk_receiver_BaseAddress, 28, Data);\n}\nu32 bpsk_receiver_enable_transfer_read(bpsk_receiver *InstancePtr) {\n\n u32 Data;\n Xil_AssertVoid(InstancePtr != NULL);\n\n Data = bpsk_receiver_ReadReg(InstancePtr->bpsk_receiver_BaseAddress, 28);\n return Data;\n}\nvoid bpsk_receiver_coarse_passthrough_write(bpsk_receiver *InstancePtr, u32 Data) {\n\n Xil_AssertVoid(InstancePtr != NULL);\n\n bpsk_receiver_WriteReg(InstancePtr->bpsk_receiver_BaseAddress, 44, Data);\n}\nu32 bpsk_receiver_coarse_passthrough_read(bpsk_receiver *InstancePtr) {\n\n u32 Data;\n Xil_AssertVoid(InstancePtr != NULL);\n\n Data = bpsk_receiver_ReadReg(InstancePtr->bpsk_receiver_BaseAddress, 44);\n return Data;\n}\nu32 bpsk_receiver_data_count_read(bpsk_receiver *InstancePtr) {\n\n u32 Data;\n Xil_AssertVoid(InstancePtr != NULL);\n\n Data = bpsk_receiver_ReadReg(InstancePtr->bpsk_receiver_BaseAddress, 32);\n return Data;\n}\nu8 bpsk_receiver_frame_size_read(bpsk_receiver *InstancePtr) {\n\n u8 Data;\n Xil_AssertVoid(InstancePtr != NULL);\n\n Data = bpsk_receiver_ReadReg(InstancePtr->bpsk_receiver_BaseAddress, 16);\n return Data;\n}\nint bpsk_receiver_freq_offset_read(bpsk_receiver *InstancePtr) {\n\n int Data;\n Xil_AssertVoid(InstancePtr != NULL);\n\n Data = bpsk_receiver_ReadReg(InstancePtr->bpsk_receiver_BaseAddress, 40);\n return Data;\n}\nu32 bpsk_receiver_packet_count_read(bpsk_receiver *InstancePtr) {\n\n u32 Data;\n Xil_AssertVoid(InstancePtr != NULL);\n\n Data = bpsk_receiver_ReadReg(InstancePtr->bpsk_receiver_BaseAddress, 24);\n return Data;\n}\nu8 bpsk_receiver_packet_size_read(bpsk_receiver *InstancePtr) {\n\n u8 Data;\n Xil_AssertVoid(InstancePtr != NULL);\n\n Data = bpsk_receiver_ReadReg(InstancePtr->bpsk_receiver_BaseAddress, 20);\n return Data;\n}\n" }, { "alpha_fraction": 0.5931809544563293, "alphanum_fraction": 0.600736141204834, "avg_line_length": 34.606895446777344, "blob_id": "aeb3cdd607c9011604882e05bf1bbe23eb5aa6f4", "content_id": "ce35c0b8f46b860fbe8551085ae76882758fada8", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5162, "license_type": "permissive", "max_line_length": 125, "num_lines": 145, "path": "/rfsoc_radio/data_inspector.py", "repo_name": "mfkiwl/BPSK-Transceiver", "src_encoding": "UTF-8", "text": "__author__ = \"David Northcote\"\n__organisation__ = \"The Univeristy of Strathclyde\"\n__support__ = \"https://github.com/strath-sdr/rfsoc_radio\"\n\nfrom pynq import DefaultIP\nfrom pynq import DefaultHierarchy\nfrom pynq import allocate\nimport numpy as np\nimport math\nimport ipywidgets as ipw\nfrom .sdr_plots import TimePlot, ConstellationPlot, SpectrumAnalyser\nfrom .dma_timer import DmaTimer\n\n\nclass DataInspector(DefaultHierarchy):\n \n def __init__(self, description, plotting_rate = 0.4, autoscale = False):\n super().__init__(description)\n \n self.data_inspector_module.packetsize = 64\n self.data_inspector_module.enable = 0\n self.data_inspector_module.reset = 1\n \n self._autoscale = autoscale\n self._plotting_rate = plotting_rate\n self.buffer = allocate(shape=(int(self.data_inspector_module.packetsize*2),), dtype=np.int16)\n \n self._data = self.get_frame()\n self._t_plot = TimePlot(self._data, animation_period=0)\n self._c_plot = ConstellationPlot(self._data, animation_period=0)\n self._f_plot = SpectrumAnalyser(self._data, fs=100e3, animation_period=0)\n self._plot_controller = DmaTimer(self._update_data, self.get_frame, self._plotting_rate)\n \n def set_axisrange(self, axisrange):\n self._t_plot.set_axisrange(axisrange)\n self._c_plot.set_axisrange(axisrange)\n \n def set_frequency(self, fs):\n self._f_plot.set_frequency(fs)\n \n def set_plotting_rate(self, rate):\n self._plotting_rate = rate\n self._plot_controller.t = rate\n \n def set_shape(self, shape):\n \"\"\"Set the buffer shape by first freeing the existing buffer\n and then allocating a new buffer with the given tuple. Obtain the\n tuple product to set the packetsize of the data_inspector_module.\n \"\"\"\n self.buffer.freebuffer()\n lshape = list(shape)\n lshape[0] = lshape[0] * 2\n tshape = tuple(lshape)\n self.buffer = allocate(shape=tshape, dtype=np.int16) \n product = 1 \n for i in shape: \n product *= i\n self.data_inspector_module.packetsize = product\n \n def get_frame(self):\n \"\"\"Get a single buffer of time data from the logic fabric\n \"\"\"\n self.data_inspector_module.reset = 0\n self.axi_dma.recvchannel.transfer(self.buffer)\n self.data_inspector_module.enable = 1\n self.axi_dma.recvchannel.wait()\n self.data_inspector_module.enable = 0\n self.data_inspector_module.reset = 1\n t_data = np.array(self.buffer) * 2**-14\n c_data = t_data[::2] + 1j * t_data[1::2]\n if self._autoscale:\n return self._scale_data(c_data)\n else:\n return c_data\n \n def _update_data(self, data):\n \"\"\"Update the timer and constellation plots with new data\"\"\"\n self._data = data\n self._t_plot.update_data(data)\n self._c_plot.update_data(data)\n self._f_plot.update_data(data)\n \n def _scale_data(self, data):\n median = np.max(data)\n mag = abs(median)\n scale = 1/mag\n return data * scale\n \n def spectrum_plot(self):\n return self._f_plot.get_widget()\n \n def time_plot(self):\n \"\"\"Returns a time plot of inspected data\n \"\"\"\n return self._t_plot.get_widget()\n \n def constellation_plot(self):\n \"\"\"Returns a constellation plot of inspected data\n \"\"\"\n return self._c_plot.get_widget()\n \n def plot_control(self):\n \"\"\"Return the plot controller\n \"\"\"\n return self._plot_controller.get_widget()\n \n def visualise(self):\n \"\"\"Return all the available features of the data inspector module\"\"\"\n return ipw.VBox([ipw.HBox([self.time_plot(), self.constellation_plot()]), self.plot_control(), self.spectrum_plot()])\n \n @staticmethod\n def checkhierarchy(description):\n if 'axi_dma' in description['ip'] \\\n and 'data_inspector_module' in description['ip']:\n return True\n return False \n \n class DataInspectorCore(DefaultIP):\n \"\"\"Driver for Data Inspector's core logic IP\n Exposes all the configuration registers by name via data-driven properties\n \"\"\"\n \n def __init__(self, description):\n super().__init__(description=description)\n \n bindto = ['UoS:RFSoC:inspector:1.0']\n \n # LUT of property addresses for our data-driven properties\n _dataInspector_props = [(\"reset\", 0),\n (\"enable\", 4),\n (\"packetsize\", 8)]\n \n # Function to return a MMIO Getter and Setter based on a relative address\n def _create_mmio_property(addr):\n def _get(self):\n return self.read(addr)\n \n def _set(self, value):\n self.write(addr, value)\n \n return property(_get, _set)\n \n # Generate getters and setters based on _dataInspector_props\n for (name, addr) in _dataInspector_props:\n setattr(DataInspectorCore, name, _create_mmio_property(addr))" }, { "alpha_fraction": 0.5443748235702515, "alphanum_fraction": 0.545034646987915, "avg_line_length": 30.915788650512695, "blob_id": "f470af0cce450b0e16f8c3743a1d70300c7b639f", "content_id": "e110117833e209c9bf077911a09c3afc5c3cd715", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3031, "license_type": "permissive", "max_line_length": 77, "num_lines": 95, "path": "/rfsoc_radio/async_radio.py", "repo_name": "mfkiwl/BPSK-Transceiver", "src_encoding": "UTF-8", "text": "__author__ = \"David Northcote\"\n__organisation__ = \"The Univeristy of Strathclyde\"\n__support__ = \"https://github.com/strath-sdr/rfsoc_radio\"\n\nimport threading\nimport asyncio\nimport ipywidgets as ipw\nimport time\n\n\ndef default_callback():\n pass\n\nclass AsyncRadioRx():\n \"\"\"Class for monitoring hardware interrupts and executing radio \n data transfer functions for the receiver.\n \"\"\" \n def __init__(self, \n irq, \n irq_callback, \n func_callback = [default_callback]):\n \"\"\"Create new asynchronous radio receiver class.\n \"\"\" \n self._interrupt = irq\n self._irq_callback = irq_callback\n self._stopping = True\n self.callback = func_callback\n self.is_running = False\n \n async def _wait(self):\n await self._interrupt.wait() # Wait for IRQ rise\n self._irq_callback()\n await self._interrupt.wait() # Wait for IRQ fall\n \n def _do(self):\n while not self._stopping:\n self.is_running = True\n self._task_wait = self._loop.create_task(self._wait())\n self._loop.run_until_complete(self._task_wait)\n for i in range(len(self.callback)):\n self.callback[i]()\n \n def start(self):\n \"\"\"Start the async irq routine.\"\"\"\n if self._stopping:\n self._stopping = False\n self._loop = asyncio.get_event_loop()\n self._thread = threading.Thread(target=self._do)\n self._thread.start()\n \n def stop(self):\n self._stopping = True\n self.is_running = False\n \n def get_widget(self):\n \"\"\"Get ipywidget controls to stop and start the generator thread.\"\"\"\n return self._start_button, self._stop_button\n \nclass AsyncRadioTx():\n \"\"\"Class for executing radio data transfer functions for the transmitter.\n \"\"\" \n def __init__(self,\n rate = 1,\n callback=[default_callback],\n timer_callback=default_callback):\n \"\"\"Create new radio transmitter class\n \"\"\"\n self._timer_callback = timer_callback\n self.callback = callback\n self.is_running = False\n self._stopping = True\n self.rate = rate\n \n def _do(self):\n while not self._stopping:\n next_timer = time.time() + self.rate\n self._timer_callback()\n for i in range(len(self.callback)):\n self.callback[i]()\n sleep_time = next_timer - time.time()\n if sleep_time > 0:\n time.sleep(sleep_time)\n \n def start(self):\n if self._stopping:\n self._stopping = False\n self.is_running = True\n for i in range(len(self.callback)):\n self.callback[i]()\n self._thread = threading.Thread(target=self._do)\n self._thread.start()\n \n def stop(self):\n self._stopping = True\n self.is_running = False" } ]
21
alexminnaar/Joint_Image_Text_Classifier
https://github.com/alexminnaar/Joint_Image_Text_Classifier
1c93593c5202b555d08155a355a0cfede8b75873
c11bbd5413ef50653c68d68d9ba78c8cdc5ae54d
c8db4d0b97eff7ee577b114fb5c17dd1d1d46d0f
refs/heads/master
2021-01-20T06:51:48.670641
2017-05-04T02:23:01
2017-05-04T02:23:01
89,937,545
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6558533310890198, "alphanum_fraction": 0.6765397191047668, "avg_line_length": 29.16312026977539, "blob_id": "75242c58bb80d09442e7fb8ff5d31be4ed8a19d0", "content_id": "ba732356e48c9f8327df0a84c760cb2b1a3da6c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4254, "license_type": "no_license", "max_line_length": 100, "num_lines": 141, "path": "/joint_classification.py", "repo_name": "alexminnaar/Joint_Image_Text_Classifier", "src_encoding": "UTF-8", "text": "import os\nfrom keras.preprocessing.text import one_hot, Tokenizer\nimport numpy as np\nfrom keras.utils import to_categorical\nfrom keras.models import Model, Sequential\nfrom keras.layers import Dense, Activation, Dropout\nfrom random import shuffle\nimport cv2\nfrom keras import applications\nfrom keras.layers import Dense, GlobalAveragePooling2D, merge, Merge, Input\nfrom keras.models import Model\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.optimizers import SGD\nfrom PIL import Image\nimport random\n\nx_train = []\ny_train = []\n\nx_train_shuf = []\ny_train_shuf = []\n\nx_train_1 = []\nx_train_2 = []\n\nmax_words = 10000\n\nepochs = 50\nbatch_size = 32\n\ntraining_dir = \"/home/aminnaar/viglink_images/combined\"\nsub_dirs = [dir for dir in os.listdir(training_dir) if \".txt\" not in dir and \".DS_Store\" not in dir]\n\nclass_counter = 0\nfor dir in sub_dirs:\n full_path = training_dir + \"/\" + dir\n text_files = [f for f in os.listdir(full_path) if \".txt\" in f]\n # print text_files\n print dir\n for tf in text_files:\n r = random.uniform(0, 1)\n if r < 0.3:\n continue\n file_root = tf.split(\".\")[0]\n image_filename = full_path + \"/\" + file_root + \".jpg\"\n # print image_filename\n image = cv2.resize(cv2.imread(image_filename), (299, 299)).astype(\n np.float32) # cv2.imread(full_path + \"/\" + file_root + \".jpg\")\n # resized = cv2.resize(image,(299,299))\n # x_train_1.append(image)\n contents = open(full_path + \"/\" + tf, \"r\").read()\n\n x_train.append((one_hot(text=contents, n=max_words, lower=True, split=\" \"), image))\n y_train.append(class_counter)\n\n class_counter += 1\n\nnum_classes = np.max(y_train) + 1\n\nindex_shuf = range(len(y_train))\nshuffle(index_shuf)\nfor i in index_shuf:\n x_train_shuf.append(x_train[i])\n y_train_shuf.append(y_train[i])\n\nx_train_text = [w[0] for w in x_train_shuf]\nx_train_image = [z[1] for z in x_train_shuf]\n# test_image_arr=np.array(x_train_image)\n#\n# blah = np.array(x_train_1)\n#\n# print \"image input array info\"\n# print blah.shape\n# print blah[0],type(blah[0])\n# print blah[0].shape\n\nprint len(x_train)\nprint len(y_train)\nprint(num_classes, 'classes')\n\nprint('Vectorizing sequence data...')\ntokenizer = Tokenizer(num_words=max_words)\nx_train_shuf = tokenizer.sequences_to_matrix(x_train_text, mode='binary')\nprint('x_train shape:', x_train_shuf.shape)\n\nprint('Convert class vector to binary class matrix '\n '(for use with categorical_crossentropy)')\ny_train_shuf = to_categorical(y_train_shuf, num_classes)\nprint('y_train shape:', y_train_shuf.shape)\n\nprint('Building model...')\n#\n# branch_1 = Sequential()\n# branch_1.add(Dense(512, input_shape=(max_words,), activation='relu'))\n\ntext_inputs = Input(shape=(max_words,))\nbranch_1 = Dense(512, activation='relu')(text_inputs)\n\n# create the base pre-trained model\nbase_model = applications.InceptionV3(weights='imagenet', include_top=False)\nfor layer in base_model.layers:\n layer.trainable = False\n\n# add a global spatial average pooling layer\nbranch_2 = base_model.output\nbranch_2 = GlobalAveragePooling2D()(branch_2)\n# let's add a fully-connected layer\n# branch_2 = Dense(256, activation='relu')(branch_2)\nbranch_2 = Dropout(0.5)(branch_2)\nbranch_2 = Dense(256, activation='sigmoid')(branch_2)\n\njoint = merge([branch_1, branch_2], mode='concat')\n# joint = Dense(512, activation='relu')(joint)\njoint = Dropout(0.5)(joint)\npredictions = Dense(num_classes, activation='softmax')(joint)\n\nfull_model = Model(inputs=[base_model.input, text_inputs], outputs=[predictions])\n\nfull_model.compile(loss='categorical_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])\n\n# print np.array(x_train_image).shape\n# print np.array(x_train_shuf).shape\n#\n# print np.array(x_train_image)[0].shape\n# print np.array(x_train_shuf)[0].shape\n#\n# print type(x_train_image)\nx_images = np.array(x_train_image)\nx_text = np.array(x_train_shuf)\ny = np.array(y_train_shuf)\n\nfor layer in full_model.layers:\n print layer\n\nhistory = full_model.fit([x_images, x_text], y,\n epochs=epochs, batch_size=batch_size,\n verbose=1, validation_split=0.2, shuffle=True)\n\nfull_model.save('model2.h5')\n\n" } ]
1
juniorjrml/desafioFinalPyDevIGTI
https://github.com/juniorjrml/desafioFinalPyDevIGTI
55b7c501ba79aa10dd1d4cb53a7b3b1fca41ac92
61e52396f6d036f081ecc66f0f74c502b45a367f
08781da9611d44a2088882da2b53b175f2a6b9f9
refs/heads/master
2023-01-12T08:12:59.038898
2020-11-17T18:56:59
2020-11-17T18:56:59
313,358,988
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6837160587310791, "alphanum_fraction": 0.6962421536445618, "avg_line_length": 30.42622947692871, "blob_id": "4d77af6a45312e55be9e86f86d1bb76539dea29e", "content_id": "a22d83c729f74b9ffde328a4584f9ed3f3420852", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1932, "license_type": "no_license", "max_line_length": 89, "num_lines": 61, "path": "/app.py", "repo_name": "juniorjrml/desafioFinalPyDevIGTI", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template, request, jsonify\nfrom treinamento_e_tratamento_modelo_aprendizado import trata_dataset\nimport numpy as np\nimport joblib\n\n\n\"\"\"\n0. Número de vezes em que ficou grávida.\n1. Concentração de glicose.\n2. Pressão diastólica (mm Hg).\n3. Espessura da dobra cutânea do tríceps (mm).\n4. Insulina (mu U/ml).\n5. Índice de massa corporal (peso em kg/(altura em m)^2).\n6. Histórico familiar de diabetes.\n7. Idade (anos).\n8. Classificação (0 ou 1 - 0 não diabético / 1 diabético ).\n\"\"\"\ncampos = [\"gest\", \"glic\", \"pressDiast\", \"espessura\", \"insulina\", \"imc\", \"heranca\", \"age\"]\n\ndef converter_para_tipo_do_elemento(x):\n try:\n return int(x)\n except ValueError:\n try:\n return float(x)\n except ValueError:\n return x\n\ndef gera_lista_formulario(form):\n lista_valores_formulario = []\n for campo in campos:\n print(form[campo])\n lista_valores_formulario.append(converter_para_tipo_do_elemento(form[campo]))\n print(lista_valores_formulario)\n return lista_valores_formulario\n\ndef previsao_diabetes(lista_valores_formulario):\n prever = np.array(lista_valores_formulario).reshape(1, 8)\n modelo_salvo = joblib.load(\"melhor_modelo.sav\")\n resultado = modelo_salvo.predict(prever)\n return resultado[0]\n\napp = Flask(__name__)\[email protected]('/', methods=['GET'])\ndef home():\n return render_template(\"Previsao.html\"), 200\n\n\[email protected]('/result', methods=['POST'])\ndef result():\n formulario = request.form\n lista_valores_formulario = gera_lista_formulario(formulario)\n resultado = previsao_diabetes(lista_valores_formulario)\n if resultado:\n previsao=\"Em nossa previsao o paciente possui Diabetes {}\".format(resultado)\n else:\n previsao=\"Em nossa previsao o paciente não possui Diabetes\".format(resultado)\n return render_template(\"Previsao.html\", previsao=previsao)\n\n\napp.run(port=5000,debug=True)" }, { "alpha_fraction": 0.7342589497566223, "alphanum_fraction": 0.7429770827293396, "avg_line_length": 32.66304397583008, "blob_id": "02ebdb33e4433da2b7a35e7790d9124ed9ad01e3", "content_id": "9deeaa7809f3dedd541ffdf538f4b14b2eb86b38", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3099, "license_type": "no_license", "max_line_length": 111, "num_lines": 92, "path": "/treinamento_e_tratamento_modelo_aprendizado.py", "repo_name": "juniorjrml/desafioFinalPyDevIGTI", "src_encoding": "UTF-8", "text": "from sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.tree import DecisionTreeClassifier\n\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import confusion_matrix\nfrom mlxtend.plotting import plot_confusion_matrix\nfrom matplotlib import pyplot as plt\nimport pandas as pd\n\nfrom copy import copy\nimport joblib\n\n\n# normalizando\ndef normalizando(entradas):\n normaliza = MinMaxScaler() # objeto para a normalização\n entradas_normalizadas = normaliza.fit_transform(entradas)\n return entradas_normalizadas\n\ndef testando(modelo, X, y):\n predicao = modelo.predict(X)\n avaliacao(y,predicao)\n return modelo.score(X,y)\n\n# Avaliando o modelo\ndef avaliacao(y_test, y_pred):\n matriz_confusao = confusion_matrix(y_test, y_pred)\n fig, ax = plot_confusion_matrix(conf_mat=matriz_confusao)\n plt.show()\n\n# separando o dataset em treino e teste e normalizando\ndef trata_dataset(X, y):\n # Normalizando \\/\n entradas_normalizadas = normalizando(X)\n\n # Dividindo o dataset para treinar e testar\n X_train, x_test, y_train, y_test = train_test_split(entradas_normalizadas, y, test_size=0.30,random_state=42)\n\n return X_train, x_test, y_train, y_test\n\n# Seleciona o modelo se o score do modelo for melhor\ndef modelo_melhor_score(modelo, X, y, melhor_modelo):\n if melhor_modelo == None or modelo.score(X, y) > melhor_modelo.score(X, y):\n return modelo\n else:\n return melhor_modelo\n\n# treina diversos modelos de uma mesma classe para salvar o melhor resultado\ndef treina_melhor_modelo(modelo_cls, X, y, ciclo = 10):\n melhor_modelo = None\n for i in range(ciclo):\n modelo = copy(modelo_cls)\n X_train, x_test, y_train, y_test = trata_dataset(X, y)\n modelo.fit(X_train, y_train)\n melhor_modelo = modelo_melhor_score(modelo, X, y, melhor_modelo)\n return melhor_modelo\n\n# Escolhe o modelo em que o score e maior em uma lista de modelos\ndef escolhe_modelo_maior_score(modelos, X, y):\n melhor_modelo = None\n for modelo in modelos:\n melhor_modelo = modelo_melhor_score(modelo, X, y, melhor_modelo)\n return melhor_modelo\n\ndataset = pd.read_csv(\"pima-indians-diabetes.csv\", header=None)\nX = dataset.drop([8], axis=1) # features\ny = dataset[8] # Alvo\n\nclf_mlp = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(5,10), random_state=1)\nclf_mlp = treina_melhor_modelo(clf_mlp, X, y, ciclo=100)\n\nclf_arvore = DecisionTreeClassifier(random_state=1)\nclf_arvore = treina_melhor_modelo(clf_arvore, X, y, ciclo=100)\n\nclf_KNN = KNeighborsClassifier(n_neighbors=5)\nclf_KNN = treina_melhor_modelo(clf_KNN, X, y, ciclo=100)\n\nnome_modelos = [\"Rede MLP Classificador\", \"KNN Classificador\", \"Arvore Classificador\"]\nmodelos = [clf_mlp, clf_KNN, clf_arvore]\n\n\nX_train, x_test, y_train, y_test = trata_dataset(X, y)\nmelhor_modelo = escolhe_modelo_maior_score(modelos, x_test, y_test)\nprint(melhor_modelo.score(X, y))\narquivo_saida = \"melhor_modelo.sav\"\njoblib.dump(melhor_modelo, arquivo_saida)\n\nprint(melhor_modelo.score(x_test,y_test))\nprint(melhor_modelo.__str__())\n" } ]
2
eytansankin/new-rep
https://github.com/eytansankin/new-rep
332a4d8b0fb7274aca44c70d554e7f86ca5ae431
4a4598fa3b73e1fc4041e8787f0b271a900333ff
8bce87844a3a74a975afbfa8b28304d95add46ea
refs/heads/master
2016-03-26T13:25:12.098810
2015-04-05T08:23:30
2015-04-05T08:23:30
33,410,688
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7096773982048035, "alphanum_fraction": 0.7096773982048035, "avg_line_length": 29, "blob_id": "82ba178867a49dfec8c0ef0a0953d3ee35bbb7d3", "content_id": "91173d9a9a8e6d2132d87cfeb9b55def7cfd9f09", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 31, "license_type": "no_license", "max_line_length": 29, "num_lines": 1, "path": "/config_feature.py", "repo_name": "eytansankin/new-rep", "src_encoding": "UTF-8", "text": "print 'i am a config feature'\n\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.7037037014961243, "avg_line_length": 26, "blob_id": "98f56754804bea953c0172fe3ed4506901155179", "content_id": "fd07eabd9384a756230177032f0ffc1118d7d28f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 27, "license_type": "no_license", "max_line_length": 26, "num_lines": 1, "path": "/config_feature_b.py", "repo_name": "eytansankin/new-rep", "src_encoding": "UTF-8", "text": "print 'i am a 2nd feature'\n" } ]
2
phoebe-bee/py-reevoo
https://github.com/phoebe-bee/py-reevoo
987372cca99c5d3705b498d5b05cab785bd06be5
f0a30e4d1d6f7bf5cd57442d76bcd42eba3d3d87
1dfcfbf4c0b7bae778aa61df2d2288172ca44559
refs/heads/master
2021-07-14T09:46:24.809721
2017-10-10T14:08:58
2017-10-10T14:08:58
105,746,889
0
0
null
2017-10-04T08:34:19
2017-10-06T07:17:20
2017-10-10T14:08:59
Python
[ { "alpha_fraction": 0.4545454680919647, "alphanum_fraction": 0.4545454680919647, "avg_line_length": 21, "blob_id": "040da2fc412c591ab1e71f3d8aeb7b8906d796de", "content_id": "0c68e922277b0daf0a296722a12dad965520687a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 22, "license_type": "no_license", "max_line_length": 21, "num_lines": 1, "path": "/__init__.py", "repo_name": "phoebe-bee/py-reevoo", "src_encoding": "UTF-8", "text": "__author__ = 'Phoebe McEwen'\n" }, { "alpha_fraction": 0.601563036441803, "alphanum_fraction": 0.6040031313896179, "avg_line_length": 46.84771728515625, "blob_id": "9ce5e92453af627405ea5d538f55833f93a988ae", "content_id": "1cd57dcd2251b79a293ca4d0a561366e2ffa6eff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 28278, "license_type": "no_license", "max_line_length": 126, "num_lines": 591, "path": "/pyreevoo.py", "repo_name": "phoebe-bee/py-reevoo", "src_encoding": "UTF-8", "text": "import json\nimport requests\nfrom operator import itemgetter\nfrom requests.auth import HTTPBasicAuth\nfrom datetime import date, datetime, timedelta\n\n\nclass ReevooAPI:\n \"\"\"\n Library to query the Reevoo API. When initialising, do NOT hard-code your API keys, make sure they are stored in an\n environment variable.\n Further documentation can be found at the GitHub repo for py-reevoo (https://github.com/phoebe-bee/py-reevoo).\n \"\"\"\n\n def __init__(self, api_key=None, api_secret=None):\n \"\"\"\n Set the API URI (constant) and set the credentials to query the API\n :param api_key:time\n :type api_key: str\n :param api_secret:\n :type api_secret: str\n \"\"\"\n self.__URI = 'https://api.reevoocloud.com'\n self.__api_key = api_key\n self.__api_secret = api_secret\n\n # create Auth object to attach to all requests made to the API\n auth = HTTPBasicAuth(self.__api_key, self.__api_secret)\n self.session = requests.Session()\n self.session.auth = auth\n\n def verify_api_keys(self):\n \"\"\"\n Returns True if API keys make a successful call, False if not. Use this after initialising to check if your API\n keys are correct and usable.\n \"\"\"\n check = self.get_organisation_list()\n # using get_organisation_list() because it only requires the API keys to make the call, no other vars\n if check.status_code == 200:\n return True\n else:\n return False\n\n def get_organisation_list(self):\n \"\"\"\n Returns a list of all organisations associated with the given API key\n \"\"\"\n path = '/v4/organisations'\n response = self.__make_request(path, 'GET')\n return response\n\n def get_organisation_detail(self, trkref, branch_code=''):\n \"\"\"\n Returns information for a specific organisation assigned to the given API key\n :param trkref: The three-character identifier for the organisation\n :type trkref: str\n :param branch_code: The identifier for a branch of the organisation (optional, defaults to None)\n :type branch_code: str\n \"\"\"\n path = '/v4/organisations/%s?branch_code=%s' % (trkref, branch_code)\n response = self.__make_request(path, 'GET')\n return response\n\n def get_reviewable_list(self, trkref, branch_code='', short_format=False, skus=None):\n \"\"\"\n Returns a list of reviewables (products) for the given organisation. If short_format is True, any organisation\n may request the reviewables (although short data contains only the SKU, review count and the average score).\n :param trkref: The three-character identifier for the organisation\n :type trkref: str\n :param branch_code: The identifier for a branch of the organisation (optional, defaults to None)\n :type branch_code: str\n :param short_format: Return the short format of the list (optional, defaults to False)\n :type short_format: bool\n :param skus: The list of SKUs to find (optional, max length 80, defaults to None)\n :type skus: list\n \"\"\"\n if skus:\n skus_string = ','.join(skus)\n else:\n skus_string = ''\n if short_format:\n path = '/v4/organisations/%s/reviewables?branch_code=%s&format=short' % (trkref, branch_code)\n else:\n path = '/v4/organisations/%s/reviewables?branch_code=%s&skus=%s' % (trkref, branch_code, skus_string)\n response = self.__make_request(path, 'GET')\n return response\n\n def get_reviewable_detail(self, trkref, sku='', branch_code='', locale='', short_format=False):\n \"\"\"\n Return the details of a single reviewable\n :param trkref: The three-character identifier for the organisation\n :type trkref: str\n :param branch_code: The identifier for a branch of the organisation (optional, defaults to None)\n :type branch_code: str\n :param locale: The locale (e.g. en-GB, optional, defaults to None)\n :type locale: str\n :param sku: The SKU to find\n :type sku: str\n :param short_format: Return the short format of the list (optional, defaults to False)\n :type short_format: bool\n \"\"\"\n if short_format:\n path = '/v4/organisations/%s/reviewable?branch_code=%s&locale=%s&sku=%s&format=short' % \\\n (trkref, branch_code, locale, sku)\n else:\n path = '/v4/organisations/%s/reviewable?branch_code=%s&locale=%s&sku=%s' % \\\n (trkref, branch_code, locale, sku)\n response = self.__make_request(path, 'GET')\n return response\n\n def get_review_list(self, trkref, locale, branch_code='', sku='', region='', page=1, per_page=15,\n automotive_options=None):\n \"\"\"\n Returns a list of published reviews for an organisation\n :param trkref: The three-character identifier for the organisation\n :type trkref: str\n :param locale: The locale (e.g. en-GB, optional, defaults to None)\n :type locale: str\n :param branch_code: The identifier for a branch of the organisation (optional, defaults to None)\n :type branch_code: str\n :param sku: The SKU to find (optional, defaults to None)\n :type sku: str\n :param region: 'my-locale', 'my-country', 'my-languages', 'english' or 'worldwide'\n :type region: str\n :param page: The index of the paginated results to return\n :type page: int\n :param per_page: The number of results to display per page\n :type per_page: int\n :param automotive_options: Options for organisations with automotive reviewables\n {\n manufacturer: str,\n model: str,\n model_variant: str (optional),\n model_year: int (optional),\n image_url: str (optional),\n body_type: str (optional),\n doors: int (optional),\n used: bool (optional),\n vehicle_type: str (optional),\n fuel_type: str (optional - 'diesel' or 'petrol')\n transmission: str (optional),\n model_display: str (optional),\n spec_description: str (optional),\n engine_size_in_liters: float (optional)\n }\n :type automotive_options: dict\n \"\"\"\n path = '/v4/organisations/%s/reviews?locale=%s&branch_code=%s&sku=%s&region=%s&page=%s&per_page=%s' % \\\n (trkref, locale, branch_code, sku, region, page, per_page)\n if automotive_options:\n auto_str = dict_to_url_args(automotive_options)\n path += '&'\n path += auto_str\n response = self.__make_request(path, 'GET')\n return response\n\n def get_review_detail(self, trkref, review_id, branch_code='', locale=''):\n \"\"\"\n Get the details for a single review\n :param trkref: The three-character identifier for the organisation\n :type trkref: str\n :param review_id: The ID of the review to fetch\n :type review_id: str\n :param branch_code: The identifier for a branch of the organisation (optional, defaults to None)\n :type branch_code: str\n :param locale: The locale (e.g. en-GB, optional, defaults to None)\n :type locale: str\n \"\"\"\n path = '/v4/reviews/%s?trkref=%s&branch_code=%s&locale=%s' % (review_id, trkref, branch_code, locale)\n response = self.__make_request(path, 'GET')\n return response\n\n def set_review_upvote_review(self, review_id, trkref=''):\n \"\"\"\n Increments the 'helpful' attribute of the review by 1\n IMPORTANT: The Reevoo API cannot detect the same user incrementing the same review repeatedly. Make sure that\n your code prevents this (http://reevoo.github.io/docs/reevooapi/review/upvote-review/).\n :param review_id: The ID of the review to fetch\n :type review_id: str\n :param trkref: The three-character identifier for the organisation\n :type trkref: str (optional, defaults to None)\n \"\"\"\n path = '/v4/reviews/%s/increment_helpful?trkref=%s' % (review_id, trkref)\n response = self.__make_request(path, 'POST')\n return response\n\n def set_review_downvote_review(self, review_id, trkref=''):\n \"\"\"\n Decrements the 'helpful' attribute of the review by 1\n IMPORTANT: The Reevoo API cannot detect the same user decrementing the same review repeatedly. Make sure that\n your code prevents this (http://reevoo.github.io/docs/reevooapi/review/downvote-review/).\n :param review_id: The ID of the review to fetch\n :type review_id: str\n :param trkref: The three-character identifier for the organisation\n :type trkref: str (optional, defaults to None)\n \"\"\"\n path = '/v4/reviews/%s/increment_unhelpful?trkref=%s' % (review_id, trkref)\n response = self.__make_request(path, 'POST')\n return response\n\n def get_customer_experience_review_list(self, trkref, branch_code='', older_reviews=False, page=1, per_page=15):\n \"\"\"\n Fetch a list of reviews for an organisation\n :param trkref: The three-character identifier for the organisation\n :type trkref: str\n :param branch_code: The identifier for a branch of the organisation (optional, defaults to None)\n :type branch_code: str\n :param older_reviews: Retrieves all reviews if True, otherwise retrieves only reviews within a certain window\n (optional, defaults to False)\n :type older_reviews: bool\n :param page: The page of paginated results to GET\n :type page: int\n :param per_page: The number of results to show per page (min 15, max 30)\n :type per_page: int\n \"\"\"\n older_reviews_str = 'false'\n if older_reviews:\n older_reviews_str = 'true'\n path = '/v4/organisations/%s/customer_experience_reviews?branch_code=%s&older_reviews=%s&page=%d&per_page=%d' % \\\n (trkref, branch_code, older_reviews_str, page, per_page)\n response = self.__make_request(path, 'GET')\n return response\n\n def get_customer_experience_review_detail(self, review_id, trkref='', branch_code=''):\n \"\"\"\n Fetch a single review by its ID\n :param review_id: The ID of the review to fetch\n :type review_id: str\n :param trkref: The three-character identifier for the organisation\n :type trkref: str (optional, defaults to None)\n :param branch_code: The identifier for a branch of the organisation (optional, defaults to None)\n :type branch_code: str\n \"\"\"\n path = '/v4/customer_experience_reviews/%s?trkref=%s&branch_code=%s' % (review_id, trkref, branch_code)\n response = self.__make_request(path, 'GET')\n return response\n\n def get_conversation_list(self, trkref, locale='', sku=''):\n \"\"\"\n Returns a list of conversations associated with a certain product\n :param trkref: The three-character identifier for the organisation\n :type trkref: str\n :param locale: The locale (e.g. en-GB, optional, defaults to None)\n :type locale: str\n :param sku: The SKU to find (optional, defaults to None)\n :type sku: str\n \"\"\"\n path = '/v4/organisations/%s/conversations?locale=%s&sku=%s' % (trkref, locale, sku)\n response = self.__make_request(path, 'GET')\n return response\n\n def get_conversation_detail(self, trkref, conversation_id):\n \"\"\"\n Returns the details for a single conversation\n :param trkref: The three-character identifier for the organisation\n :type trkref: str\n :param conversation_id: The ID of the conversation to fetch\n :type conversation_id: str\n \"\"\"\n path = '/v4/conversations/%s?trkref=%s' % (conversation_id, trkref)\n response = self.__make_request(path, 'GET')\n return response\n\n def set_conversation_create(self, trkref, conversation_data):\n \"\"\"\n Creates a new conversation question\n :param trkref: The three-character identifier for the organisation\n :type trkref: str\n :param conversation_data: The details for the question\n :type conversation_data: dict\n \"\"\"\n path = '/v4/organisations/%s/conversations' % (trkref,)\n response = self.__make_request(path, 'POST', conversation_data)\n return response\n\n def set_conversation_upvote_question(self, trkref, question_id):\n \"\"\"\n Increments the 'helpful' attribute of the question by 1\n IMPORTANT: The Reevoo API cannot detect the same user incrementing the same question repeatedly. Make sure that\n your code prevents this (http://reevoo.github.io/docs/reevooapi/review/conversation-upvote-question/).\n :param trkref: The three-character identifier for the organisation\n :type trkref: str\n :param question_id: The ID of the question\n :type question_id: str\n \"\"\"\n path = '/v4/conversations/%s/increment_helpful?trkref=%s' % (question_id, trkref)\n response = self.__make_request(path, 'POST')\n return response\n\n def set_conversation_downvote_question(self, trkref, question_id):\n \"\"\"\n Decrements the 'helpful' attribute of the question by 1\n IMPORTANT: The Reevoo API cannot detect the same user decrementing the same question repeatedly. Make sure that\n your code prevents this (http://reevoo.github.io/docs/reevooapi/review/conversation-downvote-question/).\n :param trkref: The three-character identifier for the organisation\n :type trkref: str\n :param question_id: The ID of the question\n :type question_id: str\n \"\"\"\n path = '/v4/conversations/%s/increment_unhelpful?trkref=%s' % (question_id, trkref)\n response = self.__make_request(path, 'POST')\n return response\n\n def set_conversation_upvote_answer(self, trkref, answer_id):\n \"\"\"\n Increments the 'helpful' attribute of the answer by 1\n IMPORTANT: The Reevoo API cannot detect the same user incrementing the same answer repeatedly. Make sure that\n your code prevents this (http://reevoo.github.io/docs/reevooapi/review/conversation-upvote-answer/).\n :param trkref: The three-character identifier for the organisation\n :type trkref: str\n :param answer_id: The ID of the answer\n :type answer_id: str\n \"\"\"\n path = '/v4/conversation_answers/%s/increment_helpful?trkref=%s' % (answer_id, trkref)\n response = self.__make_request(path, 'POST')\n return response\n\n def set_conversation_downvote_answer(self, trkref, answer_id):\n \"\"\"\n Decrements the 'helpful' attribute of the answer by 1\n IMPORTANT: The Reevoo API cannot detect the same user decrementing the same answer repeatedly. Make sure that\n your code prevents this (http://reevoo.github.io/docs/reevooapi/review/conversation-downvote-answer/).\n :param trkref: The three-character identifier for the organisation\n :type trkref: str\n :param answer_id: The ID of the answer\n :type answer_id: str\n \"\"\"\n path = '/v4/conversation_answers/%s/increment_unhelpful?trkref=%s' % (answer_id, trkref)\n response = self.__make_request(path, 'POST')\n return response\n\n def set_customer_order_single_submission(self, trkref, customer_order_data):\n \"\"\"\n Submit a dict of customer order details as a JSON object. See Reevoo documentation for fields to include -\n http://reevoo.github.io/docs/reevooapi/customer-order/customer-order-single-submission/\n :param trkref: The three-character identifier for the organisation\n :type trkref: str\n :param customer_order_data: The customer order data\n :type customer_order_data: dict\n \"\"\"\n path = '/v4/organisations/%s/customer_order' % (trkref,)\n response = self.__make_request(path, 'POST', customer_order_data)\n return response\n\n def set_customer_order_batch_submission(self, customer_order_batch_data):\n \"\"\"\n Submit a batch of customer order details as a JSON list. See Reevoo documentation for fields to include -\n http://reevoo.github.io/docs/reevooapi/customer-order/customer-order-batch-submission/\n :param customer_order_batch_data: The customer order data\n :type customer_order_batch_data: list\n \"\"\"\n path = '/v4/customer_orders'\n response = self.__make_request(path, 'POST', customer_order_batch_data)\n return response\n\n def get_purchaser_detail(self, trkref, email):\n \"\"\"\n Returns a purchaser resource identified by a customer email\n :param trkref: The three-character identifier for the organisation\n :type trkref: str\n :param email: The email of the customer\n :type email: str\n \"\"\"\n path = '/v4/organisations/%s/purchasers/%s' % (trkref, email)\n response = self.__make_request(path, 'GET')\n return response\n\n def set_purchaser_create(self, trkref, purchaser_data):\n \"\"\"\n Creates a purchaser record from a JSON string. See Reevoo documentation for fields to include -\n http://reevoo.github.io/docs/reevooapi/purchaser/purchaser-create/\n :param trkref: The three-character identifier for the organisation\n :type trkref: str\n :param purchaser_data: The purchaser data\n :type purchaser_data: dict\n \"\"\"\n path = '/v4/organisations/%s/purchasers' % (trkref,)\n response = self.__make_request(path, 'POST', purchaser_data)\n return response\n\n def set_purchaser_update(self, trkref, email, purchaser_data):\n \"\"\"\n Update a purchaser record using an email to identify the purchaser\n :param trkref: The three-character identifier for the organisation\n :type trkref: str\n :param email: The email of the customer\n :type email: str\n :param purchaser_data: The purchaser data\n :type purchaser_data: dict\n \"\"\"\n path = '/v4/organisations/%s/purchasers/%s' % (trkref, email)\n response = self.__make_request(path, 'POST', purchaser_data)\n return response\n\n def get_purchaser_list(self, trkref, email):\n \"\"\"\n Returns a list of all purchases made by a purchaser with a given email address\n :param trkref: The three-character identifier for the organisation\n :type trkref: str\n :param email: The email address of the purchaser\n :type email: str\n \"\"\"\n path = '/v4/organisations/%s/purchasers/%s/purchases' % (trkref, email)\n response = self.__make_request(path, 'GET')\n return response\n\n def get_purchaser_match(self, trkref, email, purchases):\n \"\"\"\n Returns a list of all purchases made by a purchaser with a given email address. The purchases will match the\n provided order references and SKUs.\n :param trkref: The three-character identifier for the organisation\n :type trkref: str\n :param email: The email address of the purchaser\n :type email: str\n :param purchases: An array of order references and SKUs to match in the format\n [{'order_ref': str, 'sku': str}, ...]\n :type purchases: list\n \"\"\"\n path = '/v4/organisations/%s/purchasers/%s/purchases/match' % (trkref, email)\n response = self.__make_request(path, 'POST', purchases)\n return response\n\n def get_questionnaire_detail(self, trkref, email, sku, order_ref, first_name='', redirect=False):\n \"\"\"\n Returns a questionnaire state or redirects to a questionnaire if redirect=True\n :param trkref: The three-character identifier for the organisation\n :type trkref: str\n :param email: The email address of the purchaser\n :type email: str\n :param sku: The SKU to find\n :type sku: str\n :param order_ref: The order reference code\n :type order_ref: str\n :param first_name: The first name of the purchaser (optional, defaults to None)\n :type first_name: str\n :param redirect: Redirects to the questionnaire if True\n :type redirect: bool\n \"\"\"\n redirect_str = 'false'\n if redirect:\n redirect_str = 'true'\n path = '/v4/organisations/%s/questionnaire?email=%s&sku=%s&order_ref=%s&first_name=%s&redirect=%s' % \\\n (trkref, email, sku, order_ref, first_name, redirect_str)\n response = self.__make_request(path, 'GET')\n return response\n\n ################################################################################################################\n #### END OF API METHODS ####\n ################################################################################################################\n\n def get_customer_experience_review_list_in_date_range(self, trkref, branch_code='', date_type='publish_date',\n start_date=None, end_date=None):\n \"\"\"\n EXPERIMENTAL - Returns a list of customer experience reviews from within a date time range. API does not support\n this, so depending on the size of the date range might be a bit heavy in terms of processing.\n Must provide start_date, end_date, or both, otherwise an error string will be returned instead of a list\n :param trkref:\n :type trkref: str\n :param branch_code:\n :type branch_code: str\n :param date_type: 'publish_date' | 'delivery_date' | 'purchase_date'\n :type date_type: str\n :param start_date: date string formatted YYYY-MM-DD\n :type start_date: str\n :param end_date: date string formatted YYYY-MM-DD\n :type end_date: str\n \"\"\"\n\n # find the number of pages in total\n page_one = self.get_customer_experience_review_list(trkref, branch_code, older_reviews=True, page=1,\n per_page=30)\n content = json.loads(page_one.text.replace('\\r\\n', ''))\n number_of_pages = content['summary']['pagination']['total_pages']\n\n if start_date is None and end_date is None:\n return \"Please provide at least one of: start_date, end_date. Otherwise use get_customer_experience_review_list()\"\n else:\n list_of_all_reviews_in_date = []\n if start_date:\n start_date = datetime.strptime(start_date, '%Y-%m-%d')\n if end_date:\n end_date = datetime.strptime(end_date, '%Y-%m-%d')\n if (start_date and end_date) or start_date:\n # Go through pages from beginning, add reviews within date to list, then return\n current_page = 1\n while current_page <= number_of_pages:\n page = self.get_customer_experience_review_list(trkref, branch_code, older_reviews=True,\n page=current_page, per_page=30)\n customer_experience_reviews = json.loads(page.text.replace('\\r\\n', ''))['customer_experience_reviews']\n reviews_in_date_from_page = get_items_in_date_range(customer_experience_reviews, date_type, start_date,\n end_date)\n list_of_all_reviews_in_date += reviews_in_date_from_page\n if len(reviews_in_date_from_page) < 30:\n # original request returns 30 reviews per page so if reviews_from_page has fewer than that, then\n # it has reached the end of the list of reviews in date and has therefore finished processing\n return list_of_all_reviews_in_date\n else:\n current_page += 1\n elif end_date:\n # Go through pages from end, add reviews within date to list, sort, then return\n current_page = number_of_pages\n while current_page >= 1:\n page = self.get_customer_experience_review_list(trkref, branch_code, older_reviews=True,\n page=current_page, per_page=30)\n customer_experience_reviews = json.loads(page.text.replace('\\r\\n', ''))[\n 'customer_experience_reviews']\n reviews_in_date_from_page = get_items_in_date_range(customer_experience_reviews, date_type,\n start_date, end_date)\n list_of_all_reviews_in_date += reviews_in_date_from_page\n if len(reviews_in_date_from_page) < 30:\n # original request returns 30 reviews per page so if reviews_from_page has fewer than that, then\n # it has reached the end of the list of reviews in date and has therefore finished processing\n list_of_all_reviews_in_date = sorted(list_of_all_reviews_in_date,\n key=itemgetter('publish_date'))\n return list_of_all_reviews_in_date\n else:\n current_page -= 1\n pass\n return list_of_all_reviews_in_date\n\n def __make_request(self, path, method, data=None):\n \"\"\"\n Make the request to the API, returns the response\n :param path: The URI path\n :type path: str\n :param method: GET | POST\n :type method: str\n :param data: Extra data to pass in POST requests (will be converted to JSON but should be passed as a dict)\n :return response:\n \"\"\"\n response = None\n uri_and_path = self.__URI + path\n if method == 'GET':\n response = self.session.get(uri_and_path)\n elif method == 'POST':\n if data:\n json_data = json.dumps(data)\n response = self.session.post(uri_and_path, json_data)\n else:\n response = self.session.post(uri_and_path)\n return response\n\n\ndef dict_to_url_args(args):\n \"\"\"\n Converts a dictionary to a string of GET arguments to be used in a URL\n :param args: The dictionary of arguments\n :type args: dict\n \"\"\"\n url_args = ''\n for key in args:\n val = args[key]\n url_args += val + '=' + str(key) + '&'\n return url_args\n\n\ndef get_items_in_date_range(list_of_items, publish_or_delivery, start_date=None, end_date=None,\n start_date_include=True, end_date_include=True):\n \"\"\"\n Checks a page of results and returns a list of those that are within a date range\n :param list_of_items: The list of items to check the dates for\n :type list_of_items: list\n :param publish_or_delivery: retrieve either the publish or the delivery date: 'publish_date' | 'delivery_date'\n :type publish_or_delivery: str\n :param start_date:\n :type start_date: datetime\n :param end_date:\n :type end_date: datetime\n :param start_date_include:\n :type start_date_include: bool\n :param end_date_include:\n :type end_date_include: bool\n \"\"\"\n list_of_items_in_date = []\n\n if start_date_include and start_date:\n start_date = start_date - timedelta(days=1)\n elif start_date is None:\n start_date = date.min\n\n if end_date_include and end_date:\n end_date = end_date - timedelta(days=1)\n elif end_date is None:\n end_date = date.max\n\n for item in list_of_items:\n item_date = datetime.strptime(item[publish_or_delivery], '%Y-%m-%d')\n if start_date < item_date < end_date:\n list_of_items_in_date.append(item)\n\n return list_of_items_in_date\n" }, { "alpha_fraction": 0.6396523714065552, "alphanum_fraction": 0.6421253681182861, "avg_line_length": 34.923858642578125, "blob_id": "fce387bec334cb36550ea29a53129ea59cc588f5", "content_id": "961f1068cbebbee46c477e13024b21679beb2d19", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 14153, "license_type": "no_license", "max_line_length": 151, "num_lines": 394, "path": "/README.md", "repo_name": "phoebe-bee/py-reevoo", "src_encoding": "UTF-8", "text": "# py-reevoo\nA Python library for the Reevoo API. Made with the help of the [Reevoo API docs](http://reevoo.github.io/docs/reevooapi/)\n\n## Methods\n\n### \\_\\_init\\_\\_(api_key, api_secret)\n\nSet the credentials to query the API\n\n| Argument | Requirement | Type |\n| --- | --- | --- |\n| `api_key` | mandatory | String |\n| `api_secret` | mandatory | String |\n\n\n### get_organisation_list()\n\nReturns a list of all organisations associated with the given API key\n\n### get_organisation_detail(self, trkref, branch_code)\n\nReturns information for a specific organisation assigned to the given API key\n\n| Argument | Requirement | Type | Default |\n| --- | --- | --- | --- |\n| `trkref` | mandatory | String | |\n| `branch_code` | optional | String | `''` |\n\n### get_reviewable_list(self, trkref, branch_code, short_format, skus)\n\nReturns a list of reviewables (products) for the given organisation. If short_format is True, any organisation\nmay request the reviewables (although short data contains only the SKU, review count and the average score).\n\n| Argument | Requirement | Type | Default |\n| --- | --- | --- | --- |\n| `trkref` | mandatory | String | |\n| `branch_code` | optional | String | `''` |\n| `short_format` | optional | Boolean | `False` |\n| `branch_code` | optional | String | `''` |\n\n### get_reviewable_detail(self, trkref, branch_code, locale, sku, short_format)\n\nReturn the details of a single reviewable\n\n| Argument | Requirement | Type | Default |\n| --- | --- | --- | --- |\n| `trkref` | mandatory | String | |\n| `branch_code` | optional | String | `''` |\n| `locale` | optional | String | `''` |\n| `sku` | optional | String | `''` |\n| `short_format` | optional | Boolean | `False` |\n\n### get_review_list(self, trkref, locale, branch_code, sku, region, page, per_page, automotive_options)\n\nReturns a list of published reviews for an organisation.\n\n| Argument | Requirement | Type | Default |\n| --- | --- | --- | --- |\n| `trkref` | mandatory | String | |\n| `locale` | mandatory | String | |\n| `branch_code` | optional | String | `''` |\n| `sku` | optional | String | `''` |\n| `region` | optional (see Options > Region) | String | `''` |\n| `page` | optional | Integer | `1` |\n| `per_page` | optional (min 15, max 30) | Integer | `15` |\n| `automotive_options` | optional | dict | `None` |\n\n##### Options\n###### Region\nFilter reviews by region by setting the `region` parameter to one of the following strings:\n\n| Value | Description |\n| --- | --- |\n| `\"my-locale\"` | Return all reviews having same locale as locale parameter |\n| `\"my-locale\"` | Return all reviews having same country code as locale parameter |\n| `\"my-locale\"` | Return all reviews having same language code as locale parameter |\n| `\"english\"` | |\n| `\"worldwide\"` | |\n\n###### Automotive Options\nA dictionary to be used when an organisation has automotive reviewables. If the organisation does not need to use\nthese options, leave the argument blank and it will default to `None`.\n\n| Value | Requirement | Type | Examples |\n| --- | --- | --- | --- |\n| `\"manufacturer\"` | mandatory | String | `\"Reliant\"` |\n| `\"model\"` | mandatory | String | `\"Robin\"` |\n| `\"model_variant\"` | optional | String | `\"Mk1\"` |\n| `\"model_year\"` | optional | Integer | `1981` |\n| `\"image_url\"` | optional | String | |\n| `\"body_type\"` | optional | String | `\"hatchback\"`, `\"saloon\"` |\n| `\"doors\"` | optional | Integer | `3` |\n| `\"used\"` | optional | Boolean | |\n| `\"vehicle_type\"` | optional | String | `\"car\"`, `\"van\"` |\n| `\"fuel_type\"` | optional | String | `\"petrol\"`, `\"diesel\"` |\n| `\"transmission\"` | optional | String | `\"manual\"`, `\"automatic\"` |\n| `\"model_display\"` | optional | String | `\"1981 Reliant Robin Mk1\"` |\n| `\"spec_description\"` | optional | String | `\"Reliant Robin Mk1 - 3 doors\"` |\n| `\"engine_size_in_liters\"` | optional | Integer | `0.8` |\n\n\n### get_review_detail(self, trkref, review_id, branch_code, locale)\n\nGet the details for a single review\n\n| Argument | Requirement | Type | Default |\n| --- | --- | --- | --- |\n| `trkref` | mandatory | String | |\n| `review_id` | optional | String | `False` |\n| `branch_code` | optional | String | `''` |\n| `locale` | optional | String | `''` |\n\n### set_review_upvote_review(self, review_id, trkref)\n\nIncrements the 'helpful' attribute of the review by 1. [IMPORTANT: The Reevoo API cannot detect the same user\nincrementing the same review repeatedly.](http://reevoo.github.io/docs/reevooapi/review/upvote-review/)\nMake sure your code prevents this.\n\n| Argument | Requirement | Type | Default |\n| --- | --- | --- | --- |\n| `review_id` | mandatory | String | |\n| `trkref` | optional | String | `''` |\n\n### set_review_downvote_review(self, review_id, trkref)\n\nDecrements the 'helpful' attribute of the review by 1. [IMPORTANT: The Reevoo API cannot detect the same user\ndecrementing the same review repeatedly.](http://reevoo.github.io/docs/reevooapi/review/downvote-review/)\nMake sure your code prevents this.\n\n| Argument | Requirement | Type | Default |\n| --- | --- | --- | --- |\n| `review_id` | mandatory | String | |\n| `trkref` | optional | String | `''` |\n\n### get_customer_experience_review_list(self, trkref, branch_code, older_reviews)\n\nFetch a list of reviews for an organisation\n\n| Argument | Requirement | Type | Default |\n| --- | --- | --- | --- |\n| `trkref` | mandatory | String | |\n| `branch_code` | optional | String | `''` |\n| `older_reviews` | optional | Boolean | `False` |\n| `page` | optional | Integer | `1` |\n| `per_page` | optional | Integer | `15` |\n\n### get_customer_experience_review_detail(self, review_id, trkref, branch_code)\n\nFetch a single review by its ID\n\n| Argument | Requirement | Type | Default |\n| --- | --- | --- | --- |\n| `review_id` | mandatory | String | |\n| `trkref` | optional | String | `''` |\n| `branch_code` | optional | String | `''` |\n\n### get_conversation_list(self, trkref, locale, sku)\n\nReturns a list of conversations associated with a certain product\n\n| Argument | Requirement | Type | Default |\n| --- | --- | --- | --- |\n| `trkref` | mandatory | String | |\n| `locale` | optional | String | `''` |\n| `sku` | optional | String | `''` |\n\n### get_conversation_detail(self, trkref, conversation_id)\n\nReturns the details for a single conversation\n\n| Argument | Requirement | Type |\n| --- | --- | --- |\n| `trkref` | mandatory | String |\n| `conversation_id` | mandatory | String |\n\n### set_conversation_create(self, trkref, conversation_data)\n\nCreate a new conversation question\n\n| Argument | Requirement | Type |\n| --- | --- | --- |\n| `trkref` | mandatory | String |\n| `conversation_data` | mandatory | dict |\n\n###### conversation_data\nThe dict should contain the following data.\n\n| Argument | Requirement | Type |\n| --- | --- | --- |\n| `sku` | mandatory | String |\n| `first_name` | mandatory | String |\n| `email` | mandatory | String |\n| `question` | mandatory | String |\n\n### set_conversation_upvote_question(self, trkref, question_id)\n\nIncrements the 'helpful' attribute of the question by 1. [IMPORTANT: The Reevoo API cannot detect the same user\nincrementing the same question repeatedly.](http://reevoo.github.io/docs/reevooapi/conversation/conversation-upvote-question/)\nMake sure your code prevents this.\n\n| Argument | Requirement | Type |\n| --- | --- | --- |\n| `trkref` | mandatory | String |\n| `question_id` | mandatory | String |\n\n### set_conversation_downvote_question(self, trkref, question_id)\n\nDecrements the 'helpful' attribute of the question by 1. [IMPORTANT: The Reevoo API cannot detect the same user\ndecrementing the same question repeatedly.](http://reevoo.github.io/docs/reevooapi/conversation/conversation-downvote-question/)\nMake sure your code prevents this.\n\n| Argument | Requirement | Type |\n| --- | --- | --- |\n| `trkref` | mandatory | String |\n| `question_id` | mandatory | String |\n\n### set_conversation_upvote_answer(self, trkref, answer_id)\n\nIncrements the 'helpful' attribute of the answer by 1. [IMPORTANT: The Reevoo API cannot detect the same user\nincrementing the same answer repeatedly.](http://reevoo.github.io/docs/reevooapi/conversation/conversation-upvote-answer/)\nMake sure your code prevents this.\n\n| Argument | Requirement | Type |\n| --- | --- | --- |\n| `trkref` | mandatory | String |\n| `answer_id` | mandatory | String |\n\n### set_conversation_downvote_answer(self, trkref, answer_id)\n\nDecrements the 'helpful' attribute of the answer by 1. [IMPORTANT: The Reevoo API cannot detect the same user\ndecrementing the same answer repeatedly.](http://reevoo.github.io/docs/reevooapi/conversation/conversation-downvote-answer/)\nMake sure your code prevents this.\n\n| Argument | Requirement | Type |\n| --- | --- | --- |\n| `trkref` | mandatory | String |\n| `answer_id` | mandatory | String |\n\n### set_customer_order_single_submission(self, trkref, customer_order_data)\n\nSubmit customer order details as a JSON object.\n\n| Argument | Requirement | Type |\n| --- | --- | --- |\n| `trkref` | mandatory | String |\n| `customer_order_data` | mandatory | dict |\n\n###### customer_order_data\nThe dict should contain the following data.\n\n| Argument | Requirement | Type | Default |\n| --- | --- | --- | --- |\n| `trkref` | mandatory | String | |\n| `order_ref` | mandatory | String | |\n| `order_date` | optional | String | `None` |\n| `fulfilment_date` | optional | String | `None` |\n| `language` | optional | String | `None` |\n| `locale` | optional | String | `None` |\n| `customer` | mandatory | String | |\n| ...`email` | mandatory | String | |\n| ...`customer_ref` | optional | String | `None` |\n| ...`title` | optional | String | `None` |\n| ...`first_name` | optional | String | `None` |\n| ...`surname` | optional | String | `None` |\n| ...`postcode` | optional | String | `None` |\n| ...`country` | optional | String | `None` |\n| `order_items` | mandatory | Array | |\n| ...`sku` | mandatory | String | |\n| ...`price` | optional | String | `None` |\n| ...`currency` | optional - Use ISO4217 code | String | `None` |\n| ...`metadata` | optional | String | `None` |\n| ......`key_with_underscore` | optional | String | `None` |\n\n### set_customer_order_batch_submission(self, customer_order_batch_data)\n\nSubmit a batch of customer order details as a JSON array.\n\n| Argument | Requirement | Type |\n| --- | --- | --- |\n| `customer_order_batch_data` | mandatory | Array |\n\n###### customer_order_batch_data\nThe argument should be a list of dicts as specified in `set_customer_order_submission()` | `customer_order_data`\n\n### get_purchaser_detail(self, trkref, email)\n\nReturns a purchaser resource identified by a customer email.\n\n| Argument | Requirement | Type |\n| --- | --- | --- |\n| `trkref` | mandatory | String |\n| `email` | mandatory | String |\n\n### set_purchaser_create(self, trkref, purchaser_data)\n\nCreates a purchaser record from a JSON string.\n\n| Argument | Requirement | Type |\n| --- | --- | --- |\n| `trkref` | mandatory | String |\n| `purchaser_data` | mandatory | String |\n\n###### purchaser_data\nThe dict should contain the following data.\n\n| Argument | Requirement | Type | Default |\n| --- | --- | --- | --- |\n| `email` | mandatory | String | |\n| `title` | optional | String | `None` |\n| `first_name` | optional | String | `None` |\n| `surname` | optional | String | `None` |\n| `country` | optional | String | `None` |\n| `postcode` | optional | String | `None` |\n\n### set_purchaser_update(self, trkref, email, purchaser_data)\n\nUpdate a purchaser record using an email to identify the purchaser\n\n| Argument | Requirement | Type |\n| --- | --- | --- |\n| `trkref` | mandatory | String |\n| `purchaser_data` | mandatory | String |\n\n###### purchaser_data\nThe dict should contain the following data.\n\n| Argument | Requirement | Type | Default |\n| --- | --- | --- | --- |\n| `email` | mandatory | String | |\n| `title` | optional | String | `None` |\n| `first_name` | optional | String | `None` |\n| `surname` | optional | String | `None` |\n| `country` | optional | String | `None` |\n| `postcode` | optional | String | `None` |\n\n### get_purchaser_list(self, trkref, email)\n\nReturns a list of all purchases made by a purchaser with a given email address\n\n| Argument | Requirement | Type |\n| --- | --- | --- |\n| `trkref` | mandatory | String |\n| `email` | mandatory | String |\n\n### get_purchaser_match(self, trkref, email, purchases)\n\nReturns a list of all purchases made by a purchaser with a given email address. The purchases will match the\nprovided order references and SKUs.\n\n| Argument | Requirement | Type |\n| --- | --- | --- |\n| `trkref` | mandatory | String |\n| `email` | mandatory | String |\n| `purchases` | mandatory | Array |\n\n###### purchases\nThe array should contain objects with the following information:\n\n| Argument | Requirement | Type |\n| --- | --- | --- |\n| `order_ref` | mandatory | String |\n| `sku` | mandatory | String |\n\n### get_questionnaire_detail(self, trkref, email, sku, order_ref, first_name, redirect)\n\nReturns a questionnaire state or redirects to a questionnaire if redirect=True\n\n| Argument | Requirement | Type | Default |\n| --- | --- | --- | --- |\n| `trkref` | mandatory | String | |\n| `email` | mandatory | String | |\n| `sku` | mandatory | String | |\n| `order_ref` | mandatory | String | |\n| `first_name` | optional | String | `''` |\n| `redirect` | optional | Boolean | `False` |\n\n### get_customer_experience_review_list_in_date_range(self, trkref, branch_code, date_type, start_date, end_date, include_start_date, include_end_date)\n\nReturns a list of customer experience reviews from within a date range.\n\nExperimental - this is not a function supported by the API, so may be a bit heavy in terms of processing in your application.\nMust provide at least one of `start_date` and `end_date` otherwise an error message will be returned.\n\n`publish_date` should be one of the following: `'publish_date`, `'delivery_date`, `'purchase_date`.\n\n| Argument | Requirement | Type | Default |\n| --- | --- | --- | --- |\n| `trkref` | mandatory | String | |\n| `branch_code` | optional | String | `''` |\n| `date_type` | optional | String | `'publish_date'` |\n| `start_date` | optional | String | `None |\n| `end_date` | optional | String | `None` |\n| `include_start_date` | optional | Boolean | `True` |\n| `include_end_date` | optional | Boolean | `True` |" }, { "alpha_fraction": 0.6099942922592163, "alphanum_fraction": 0.6202157735824585, "avg_line_length": 50.34110641479492, "blob_id": "a326d6d2d2df3539f957d01715194e5c9f32e1c7", "content_id": "75c3e46b1af61321ea8c5a104424e07479d63b9d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17610, "license_type": "no_license", "max_line_length": 143, "num_lines": 343, "path": "/test.py", "repo_name": "phoebe-bee/py-reevoo", "src_encoding": "UTF-8", "text": "import json\nimport unittest\n\nfrom pyreevoo import ReevooAPI\nfrom os import environ\n\n\"\"\"\nTest suite for py-reevoo.\nTo test, set the following environment variables:\nAPI_KEY\nAPI_SECRET\nTRKREF\nLOCALE\nSKU (choose a SKU you know is in the dataset)\nREVIEW_ID (choose a review ID you know is in the dataset)\nCONVERSATION_ID\nQUESTION_ID\nANSWER_ID\nEMAIL\n\nYou will also need to set some dummy data for a test conversation in\n\"\"\"\n\n\nclass Test(unittest.TestCase):\n def test_verify_api_keys(self):\n \"\"\"\n Test the function that verifies the API keys works. Should return True if valid API keys are provided.\n \"\"\"\n reevoo = ReevooAPI(environ.get('API_KEY'), environ.get('API_SECRET'))\n api_keys_valid = reevoo.verify_api_keys()\n self.assertEqual(api_keys_valid, True)\n\n def test_verify_wrong_api_keys(self):\n \"\"\"\n Test the function that verifies the API keys works. Uses invalid API keys. Should return False.\n \"\"\"\n reevoo = ReevooAPI('QWERTYUIOP', 'ASDFGHJKL')\n api_keys_valid = reevoo.verify_api_keys()\n self.assertEqual(api_keys_valid, False)\n\n def test_get_organisation_list(self):\n \"\"\"\n Test the function that gets a list of organisations associated with a TRKREF. Should return response code 200.\n \"\"\"\n reevoo = ReevooAPI(environ.get('API_KEY'), environ.get('API_SECRET'))\n response = reevoo.get_organisation_list()\n self.assertEqual(response.status_code, 200, 'test_get_organisation_list failed - Response code %d, %s'\n % (response.status_code, response.reason))\n\n def test_get_organisation_detail(self):\n \"\"\"\n Test the function that gets detailed info of an organisation from a TRKREF. Should return response code 200.\n \"\"\"\n reevoo = ReevooAPI(environ.get('API_KEY'), environ.get('API_SECRET'))\n response = reevoo.get_organisation_detail(environ.get('TRKREF'))\n self.assertEqual(response.status_code, 200, 'test_get_organisation_detail failed - Response code %d, %s'\n % (response.status_code, response.reason))\n\n def test_get_reviewable_list(self):\n \"\"\"\n Test the function that gets a list of reviewables (products). Should return status code 200.\n \"\"\"\n reevoo = ReevooAPI(environ.get('API_KEY'), environ.get('API_SECRET'))\n response = reevoo.get_reviewable_list(environ.get('TRKREF'))\n self.assertEqual(response.status_code, 200, 'test_get_reviewable_list failed - Response code %d, %s'\n % (response.status_code, response.reason))\n\n def test_get_reviewable_list_short_format(self):\n \"\"\"\n Test the function that gets a list of reviewables (products). Should return status code 200.\n \"\"\"\n reevoo = ReevooAPI(environ.get('API_KEY'), environ.get('API_SECRET'))\n response = reevoo.get_reviewable_list(environ.get('TRKREF'), short_format=True)\n content = json.loads(response.content)\n self.assertEqual(len(content['reviewables'][0]), 3,\n 'test_get_reviewable_list_short_format failed - Response code %d, %s'\n % (response.status_code, response.reason))\n\n def test_get_reviewable_detail(self):\n \"\"\"\n Test the function that gets the detailed information for a reviewable (product). Should return status code 200.\n \"\"\"\n reevoo = ReevooAPI(environ.get('API_KEY'), environ.get('API_SECRET'))\n response = reevoo.get_reviewable_detail(environ.get('TRKREF'), environ.get('SKU'))\n self.assertEqual(response.status_code, 200, 'test_get_reviewable_detail failed - Response code %d, %s'\n % (response.status_code, response.reason))\n\n def test_get_reviewable_detail_short_format(self):\n \"\"\"\n Test the function that gets the detailed information for a reviewable (product). Should return status code 200.\n \"\"\"\n reevoo = ReevooAPI(environ.get('API_KEY'), environ.get('API_SECRET'))\n response = reevoo.get_reviewable_detail(environ.get('TRKREF'), environ.get('SKU'), short_format=True)\n self.assertEqual(response.status_code, 200,\n 'test_get_reviewable_detail_short_format failed - Response code %d, %s'\n % (response.status_code, response.reason))\n\n def test_get_review_list(self):\n \"\"\"\n Test the function that gets the list of reviews for a TRKREF. Should return status code 200.\n \"\"\"\n reevoo = ReevooAPI(environ.get('API_KEY'), environ.get('API_SECRET'))\n response = reevoo.get_review_list(environ.get('TRKREF'), environ.get('LOCALE'), sku=environ.get('SKU'))\n self.assertEqual(response.status_code, 200, 'test_get_review_list failed - Response code %d, %s'\n % (response.status_code, response.reason))\n\n def test_get_review_detail(self):\n \"\"\"\n Test the function that gets the detail of a review from its ID. Should return status code 200.\n \"\"\"\n reevoo = ReevooAPI(environ.get('API_KEY'), environ.get('API_SECRET'))\n response = reevoo.get_review_detail(environ.get('TRKREF'), environ.get('REVIEW_ID'))\n self.assertEqual(response.status_code, 200, 'test_get_review_detail failed - Response code %d, %s'\n % (response.status_code, response.reason))\n\n def test_set_review_upvote_review(self):\n \"\"\"\n Test the function that upvotes a review. Should return status code 202.\n \"\"\"\n reevoo = ReevooAPI(environ.get('API_KEY'), environ.get('API_SECRET'))\n response = reevoo.set_review_upvote_review(environ.get('REVIEW_ID'), environ.get('TRKREF'))\n self.assertEqual(response.status_code, 202, 'test_set_review_upvote_review failed - Response code %d, %s'\n % (response.status_code, response.reason))\n\n def test_set_review_downvote_review(self):\n \"\"\"\n Test the function that downvotes a review. Should return status code 202.\n \"\"\"\n reevoo = ReevooAPI(environ.get('API_KEY'), environ.get('API_SECRET'))\n response = reevoo.set_review_downvote_review(environ.get('REVIEW_ID'), environ.get('TRKREF'))\n self.assertEqual(response.status_code, 202, 'test_set_review_downvote_review failed - Response code %d, %s'\n % (response.status_code, response.reason))\n\n def test_get_customer_experience_review_list(self):\n \"\"\"\n Test the function that gets a list of customer experience reviews. Should return status code 200.\n \"\"\"\n reevoo = ReevooAPI(environ.get('API_KEY'), environ.get('API_SECRET'))\n response = reevoo.get_customer_experience_review_list(environ.get('TRKREF'))\n self.assertEqual(response.status_code, 200,\n 'test_get_customer_experience_review_list failed - Response code %d, %s'\n % (response.status_code, response.reason))\n\n def test_get_customer_experience_review_detail(self):\n \"\"\"\n Test the function that gets a detailed customer experience review from its ID. Should return status code 200.\n \"\"\"\n reevoo = ReevooAPI(environ.get('API_KEY'), environ.get('API_SECRET'))\n response = reevoo.get_customer_experience_review_detail(environ.get('REVIEW_ID'))\n self.assertEqual(response.status_code, 200,\n 'test_get_customer_experience_review_detail failed - Response code %d, %s'\n % (response.status_code, response.reason))\n\n def test_get_conversation_list(self):\n \"\"\"\n Test the function that gets a list of conversations. Should return status code 200.\n \"\"\"\n reevoo = ReevooAPI(environ.get('API_KEY'), environ.get('API_SECRET'))\n response = reevoo.get_conversation_list(environ.get('TRKREF'), locale=environ.get('LOCALE'), sku=environ.get('SKU'))\n self.assertEqual(response.status_code, 200,\n 'test_get_conversation_list failed - Response code %d, %s'\n % (response.status_code, response.reason))\n\n def test_get_conversation_detail(self):\n \"\"\"\n Test the function that gets a detailed conversation from its ID. Should return status code 200.\n \"\"\"\n reevoo = ReevooAPI(environ.get('API_KEY'), environ.get('API_SECRET'))\n response = reevoo.get_conversation_detail(environ.get('TRKREF'), environ.get('CONVERSATION_ID'))\n self.assertEqual(response.status_code, 200,\n 'test_get_conversation_list failed - Response code %d, %s'\n % (response.status_code, response.reason))\n\n def test_set_conversation_create(self):\n \"\"\"\n Test the function that creates a new conversation. Should return status code 202.\n \"\"\"\n dummy_conversation = {\n\n }\n reevoo = ReevooAPI(environ.get('API_KEY'), environ.get('API_SECRET'))\n response = reevoo.set_conversation_create(environ.get('TRKREF'), dummy_conversation)\n self.assertEqual(response.status_code, 202,\n 'test_get_conversation_list failed - Response code %d, %s'\n % (response.status_code, response.reason))\n\n def test_set_conversation_upvote_question(self):\n \"\"\"\n Test the function that upvotes a question by ID. Should return status code 202.\n \"\"\"\n reevoo = ReevooAPI(environ.get('API_KEY'), environ.get('API_SECRET'))\n response = reevoo.set_conversation_upvote_question(environ.get('TRKREF'), environ.get('QUESTION_ID'))\n self.assertEqual(response.status_code, 202,\n 'test_set_conversation_upvote_question failed - Response code %d, %s'\n % (response.status_code, response.reason))\n\n def test_set_conversation_downvote_question(self):\n \"\"\"\n Test the function that downvotes a question by ID. Should return status code 202.\n \"\"\"\n reevoo = ReevooAPI(environ.get('API_KEY'), environ.get('API_SECRET'))\n response = reevoo.set_conversation_downvote_question(environ.get('TRKREF'), environ.get('QUESTION_ID'))\n self.assertEqual(response.status_code, 202,\n 'test_set_conversation_downvote_question failed - Response code %d, %s'\n % (response.status_code, response.reason))\n\n def test_set_conversation_upvote_answer(self):\n \"\"\"\n Test the function that upvotes an answer by ID. Should return status code 202.\n \"\"\"\n reevoo = ReevooAPI(environ.get('API_KEY'), environ.get('API_SECRET'))\n response = reevoo.set_conversation_upvote_answer(environ.get('TRKREF'), environ.get('ANSWER_ID'))\n self.assertEqual(response.status_code, 202,\n 'test_set_conversation_upvote_answer failed - Response code %d, %s'\n % (response.status_code, response.reason))\n\n def test_set_conversation_downvote_answer(self):\n \"\"\"\n Test the function that downvotes an answer by ID. Should return status code 202.\n \"\"\"\n reevoo = ReevooAPI(environ.get('API_KEY'), environ.get('API_SECRET'))\n response = reevoo.set_conversation_downvote_answer(environ.get('TRKREF'), environ.get('ANSWER_ID'))\n self.assertEqual(response.status_code, 202,\n 'test_set_conversation_downvote_answer failed - Response code %d, %s'\n % (response.status_code, response.reason))\n\n def test_set_customer_order_single_submission(self):\n \"\"\"\n Test the function that submits the details of a single customer order. Should return status code 202.\n \"\"\"\n dummy_order = {\n\n }\n reevoo = ReevooAPI(environ.get('API_KEY'), environ.get('API_SECRET'))\n response = reevoo.set_customer_order_single_submission(environ.get('TRKREF'), dummy_order)\n self.assertEqual(response.status_code, 202,\n 'test_set_customer_order_single_submission failed - Response code %d, %s'\n % (response.status_code, response.reason))\n\n def test_set_customer_order_batch_submission(self):\n \"\"\"\n Test the function that submits the details of a batch of customer orders. Should return status code 202 or 206.\n \"\"\"\n dummy_order_batch = [\n\n ]\n reevoo = ReevooAPI(environ.get('API_KEY'), environ.get('API_SECRET'))\n response = reevoo.set_customer_order_batch_submission(dummy_order_batch)\n self.assertIn(response.status_code, [202, 206],\n 'test_set_customer_order_batch_submission failed - Response code %d, %s'\n % (response.status_code, response.reason))\n\n def test_get_purchaser_detail(self):\n \"\"\"\n Test the function that gets the details of a purchaser. Should return status code 202.\n \"\"\"\n reevoo = ReevooAPI(environ.get('API_KEY'), environ.get('API_SECRET'))\n response = reevoo.get_purchaser_detail(environ.get('TRKREF'), environ.get('EMAIL'))\n self.assertEqual(response.status_code, 200,\n 'test_get_purchaser_detail failed - Response code %d, %s'\n % (response.status_code, response.reason))\n\n def test_set_purchaser_create(self):\n \"\"\"\n Test the function that creates a new purchaser. Should return status code 202.\n \"\"\"\n purchaser_data = {\n\n }\n reevoo = ReevooAPI(environ.get('API_KEY'), environ.get('API_SECRET'))\n response = reevoo.set_purchaser_create(environ.get('TRKREF'), purchaser_data)\n self.assertEqual(response.status_code, 202,\n 'test_get_purchaser_detail failed - Response code %d, %s'\n % (response.status_code, response.reason))\n\n def test_set_purchaser_update(self):\n \"\"\"\n Test the function that updates an existing purchaser's details. Should return status code 202.\n \"\"\"\n purchaser_data = {\n\n }\n reevoo = ReevooAPI(environ.get('API_KEY'), environ.get('API_SECRET'))\n response = reevoo.set_purchaser_update(environ.get('TRKREF'), environ.get('EMAIL'), purchaser_data)\n self.assertEqual(response.status_code, 202,\n 'test_set_purchaser_create failed - Response code %d, %s'\n % (response.status_code, response.reason))\n\n def test_get_purchaser_list(self):\n \"\"\"\n Test the function that gets a list of purchases associated with an email address. Should return status code 200.\n \"\"\"\n reevoo = ReevooAPI(environ.get('API_KEY'), environ.get('API_SECRET'))\n response = reevoo.get_purchaser_list(environ.get('TRKREF'), environ.get('EMAIL'))\n self.assertEqual(response.status_code, 200,\n 'test_get_purchaser_list failed - Response code %d, %s'\n % (response.status_code, response.reason))\n\n def test_get_purchaser_match(self):\n \"\"\"\n Test the function that gets the purchases associated with the email address, skus and order references. Should\n return status code 200.\n \"\"\"\n purchases = [\n\n ]\n reevoo = ReevooAPI(environ.get('API_KEY'), environ.get('API_SECRET'))\n response = reevoo.get_purchaser_match(environ.get('TRKREF'), environ.get('EMAIL'), purchases)\n self.assertEqual(response.status_code, 200,\n 'test_get_purchaser_match failed - Response code %d, %s'\n % (response.status_code, response.reason))\n\n def test_get_questionnaire_detail(self):\n \"\"\"\n Test the function that gets detailed information for a questionnaire.\n \"\"\"\n reevoo = ReevooAPI(environ.get('API_KEY'), environ.get('API_SECRET'))\n response = reevoo.get_questionnaire_detail(environ.get('TRKREF'), environ.get('EMAIL'), environ.get('SKU'),\n environ.get('ORDER_REF'))\n self.assertEqual(response.status_code, 200,\n 'test_set_conversation_downvote_answer failed - Response code %d, %s'\n % (response.status_code, response.reason))\n\n def test_get_customer_experience_review_list_in_date_range_no_dates(self):\n \"\"\"\n Test that the function which returns customer experience reviews within a date range throws an error when given\n no dates. Should return an error string.\n \"\"\"\n reevoo = ReevooAPI(environ.get('API_KEY'), environ.get('API_SECRET'))\n response = reevoo.get_customer_experience_review_list_in_date_range(environ.get('TRKREF'))\n self.assertEqual(response, \"Please provide at least one of: start_date, end_date. Otherwise use get_customer_experience_review_list()\")\n\n def test_get_customer_experience_review_list_in_date_range(self):\n \"\"\"\n Test the function that gets a list of customer experience reviews from within a date range.\n Should return a list.\n \"\"\"\n reevoo = ReevooAPI(environ.get('API_KEY'), environ.get('API_SECRET'))\n list_in_date_range = reevoo.get_customer_experience_review_list_in_date_range(environ.get('TRKREF'),\n start_date='2016-01-01',\n end_date='2017-03-31')\n self.assertIsInstance(list_in_date_range, list)\n" } ]
4
HumanAcademy-AI-Cource/16Sample
https://github.com/HumanAcademy-AI-Cource/16Sample
65a91496e245fc96a0570c10689d9cffb787f0a3
ace875ca7dac2903f2fc706559a2b44b8040358d
1bac3c7757397466e4345c7d42d4872c890731ea
refs/heads/main
2023-04-29T04:56:22.802870
2021-05-15T11:29:19
2021-05-15T11:29:19
301,574,345
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6657142639160156, "alphanum_fraction": 0.6899999976158142, "avg_line_length": 24, "blob_id": "516242aefcaefefba88f2703c1b335e0490b0054", "content_id": "aff427a72163621eef118d2abda777488c4d4c70", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1212, "license_type": "permissive", "max_line_length": 70, "num_lines": 28, "path": "/README.md", "repo_name": "HumanAcademy-AI-Cource/16Sample", "src_encoding": "UTF-8", "text": "このプログラムはヒューマンアカデミー株式会社のAI入門講座で使用するプログラムです。\n\n# 16Sample\n\n### ディレクトリ構造\n\n* aws\n * translate1.py\n * プログラム内に書かれている文書を翻訳\n * translate2.py\n * translate2.txtに書かれている英語の文章を日本語に翻訳\n * translate2.txt\n * translate2.pyで使う英語の文章が書かれたテキストファイル\n * translate3.py\n * 英語の文章を入力し日本語に翻訳\n * translate4.py\n * translate4.txtに書かれている日本語の文章を様々な言語に翻訳\n * translate4.txt\n * translate4.pyで使う日本語の文章が書かれたテキストファイル\n * translate5.py\n * translate5.txtに書かれている日本語の文章を連鎖的に様々な言語へ翻訳し、途中経過として日本語への再翻訳を行う\n * translate5.txt\n * translate5.pyで使う日本語の文章が書かれたテキストファイル\n* original_translate\n * dictionary.csv\n * 翻訳に使う辞書データ\n * translate0.py\n * オリジナルの翻訳プログラム\n" }, { "alpha_fraction": 0.699999988079071, "alphanum_fraction": 0.7214285731315613, "avg_line_length": 10.75, "blob_id": "3094b781ee05326790cfbfc282e3668f435ffdbd", "content_id": "b376724ec9efdd857a44813328d08d6b400ea23c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 268, "license_type": "permissive", "max_line_length": 35, "num_lines": 12, "path": "/original_translate/README.md", "repo_name": "HumanAcademy-AI-Cource/16Sample", "src_encoding": "UTF-8", "text": "# 16Sample\n\n## オリジナルの翻訳プログラム\n\n### 準備\n翻訳に使う辞書データを`dictionary.csv`に書く\n\n### 実行\n```\n# プログラム内に書かれている文書を翻訳(オリジナルの翻訳プログラム)\npython translate0.py\n```" }, { "alpha_fraction": 0.50348299741745, "alphanum_fraction": 0.5054179430007935, "avg_line_length": 25.91666603088379, "blob_id": "2f51e33a9a2d233076d7e27f50120d38a1ac8071", "content_id": "f7cb273550f1c6f81b2da6500166f7f16f57106b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3316, "license_type": "permissive", "max_line_length": 50, "num_lines": 96, "path": "/aws/translate5.py", "repo_name": "HumanAcademy-AI-Cource/16Sample", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# ライブラリのインポート\nimport boto3\n\n\n# テキストファイルから文章を読み出す\nwith open(\"./translate5.txt\") as f:\n input_text = f.read()\n\n# 翻訳前の文を表示\nprint(\"------------------------------------\")\nprint(\"○ 翻訳前:\\n{}\".format(input_text))\nprint(\"------------------------------------\")\n\n# AWSを使った翻訳の準備\ntranslate = boto3.client(service_name=\"translate\")\n\n############################################\n\n\ndef honyaku(text, source, target):\n \"\"\"\n AWSを使って簡単に翻訳できるようにした関数\n \"\"\"\n\n result = translate.translate_text(\n Text=text,\n SourceLanguageCode=source,\n TargetLanguageCode=target\n )[\"TranslatedText\"].encode(\"UTF-8\")\n return result\n\n#############################################\n\n\n# 日本語から英語へ翻訳\ntranslate_en = honyaku(input_text, \"ja\", \"en\")\n# 英語への翻訳結果から日本語に翻訳\ntranslate_ja = honyaku(translate_en, \"en\", \"ja\")\nprint(\"○ 英語から日本語:\\n{}\".format(translate_ja))\nprint(\"------------------------------------\")\n\n#############################################\n\n# 英語からスペイン語に翻訳\ntranslate_es = honyaku(translate_en, \"en\", \"es\")\n# スペイン語への翻訳結果から日本語に翻訳\ntranslate_ja = honyaku(translate_es, \"es\", \"ja\")\nprint(\"○ スペイン語から日本語:\\n{}\".format(translate_ja))\nprint(\"------------------------------------\")\n\n#############################################\n\n# スペイン語からフィンランド語に翻訳\ntranslate_fi = honyaku(translate_es, \"es\", \"fi\")\n# フィンランド語への翻訳結果から日本語に翻訳\ntranslate_ja = honyaku(translate_fi, \"fi\", \"ja\")\nprint(\"○ フィンランド語から日本語:\\n{}\".format(translate_ja))\nprint(\"------------------------------------\")\n\n#############################################\n\n# フィンランド語からイタリア語に翻訳\ntranslate_it = honyaku(translate_fi, \"fi\", \"it\")\n# イタリア語への翻訳結果から日本語に翻訳\ntranslate_ja = honyaku(translate_it, \"it\", \"ja\")\nprint(\"○ イタリア語から日本語:\\n{}\".format(translate_ja))\nprint(\"------------------------------------\")\n\n#############################################\n\n# イタリア語からドイツ語に翻訳\ntranslate_de = honyaku(translate_it, \"it\", \"de\")\n# ドイツ語への翻訳結果から日本語に翻訳\ntranslate_ja = honyaku(translate_de, \"de\", \"ja\")\nprint(\"○ ドイツ語から日本語:\\n{}\".format(translate_ja))\nprint(\"------------------------------------\")\n\n#############################################\n\n# ドイツ語からチェコ語に翻訳\ntranslate_cs = honyaku(translate_de, \"de\", \"cs\")\n# チェコ語への翻訳結果から日本語に翻訳\ntranslate_ja = honyaku(translate_cs, \"cs\", \"ja\")\nprint(\"○ チェコ語から日本語:\\n{}\".format(translate_ja))\nprint(\"------------------------------------\")\n\n#############################################\n\n# チェコ語からロシア語に翻訳\ntranslate_ru = honyaku(translate_cs, \"cs\", \"ru\")\n# ロシア語への翻訳結果から日本語に翻訳\ntranslate_ja = honyaku(translate_ru, \"ru\", \"ja\")\nprint(\"○ ロシア語から日本語:\\n{}\".format(translate_ja))\n" }, { "alpha_fraction": 0.5066870450973511, "alphanum_fraction": 0.5108903050422668, "avg_line_length": 24.656862258911133, "blob_id": "c487fab145cfc9ba84a416289afe6d375c3dbe7f", "content_id": "1212a854d9d08f5125909b9e353f4cf39af43c18", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3107, "license_type": "permissive", "max_line_length": 50, "num_lines": 102, "path": "/aws/translate4.py", "repo_name": "HumanAcademy-AI-Cource/16Sample", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# ライブラリのインポート\nimport boto3\n\n\n# テキストファイルから文章を読み出す\nwith open(\"./translate4.txt\") as f:\n input_text = f.read()\n\n# 翻訳前の文章を表示\nprint(\"------------------------------------\")\nprint(\"○ 翻訳前: \\n{}\".format(input_text))\nprint(\"------------------------------------\")\n\n# AWSを使った翻訳の準備\ntranslate = boto3.client(service_name=\"translate\")\n\n#############################################\n\n# 日本語から英語へ翻訳\ntranslate_en = translate.translate_text(\n Text=input_text,\n SourceLanguageCode=\"ja\",\n TargetLanguageCode=\"en\"\n)[\"TranslatedText\"].encode(\"UTF-8\")\n# 結果を表示\nprint(\"○ 翻訳後(英語):\\n{}\".format(translate_en))\nprint(\"------------------------------------\")\n\n#############################################\n\n# 日本語からスペイン語へ翻訳\ntranslate_es = translate.translate_text(\n Text=input_text,\n SourceLanguageCode=\"ja\",\n TargetLanguageCode=\"es\"\n)[\"TranslatedText\"].encode(\"UTF-8\")\n# 結果を表示\nprint(\"○ 翻訳後(スペイン語):\\n{}\".format(translate_es))\nprint(\"------------------------------------\")\n\n#############################################\n\n# 日本語からフィンランド語へ翻訳\ntranslate_fi = translate.translate_text(\n Text=input_text,\n SourceLanguageCode=\"ja\",\n TargetLanguageCode=\"fi\"\n)[\"TranslatedText\"].encode(\"UTF-8\")\n# 結果を表示\nprint(\"○ 翻訳後(フィンランド語):\\n{}\".format(translate_fi))\nprint(\"------------------------------------\")\n\n#############################################\n\n# 日本語からイタリア語へ翻訳\ntranslate_it = translate.translate_text(\n Text=input_text,\n SourceLanguageCode=\"ja\",\n TargetLanguageCode=\"it\"\n)[\"TranslatedText\"].encode(\"UTF-8\")\n# 結果を表示\nprint(\"○ 翻訳後(イタリア語):\\n{}\".format(translate_it))\nprint(\"------------------------------------\")\n\n#############################################\n\n# 日本語からドイツ語へ翻訳\ntranslate_de = translate.translate_text(\n Text=input_text,\n SourceLanguageCode=\"ja\",\n TargetLanguageCode=\"de\"\n)[\"TranslatedText\"].encode(\"UTF-8\")\n# 結果を表示\nprint(\"○ 翻訳後(ドイツ語):\\n{}\".format(translate_de))\nprint(\"------------------------------------\")\n\n#############################################\n\n# 日本語からチェコ語へ翻訳\ntranslate_cs = translate.translate_text(\n Text=input_text,\n SourceLanguageCode=\"ja\",\n TargetLanguageCode=\"cs\"\n)[\"TranslatedText\"].encode(\"UTF-8\")\n# 結果を表示\nprint(\"○ 翻訳後(チェコ語):\\n{}\".format(translate_cs))\nprint(\"------------------------------------\")\n\n#############################################\n\n# 日本語からロシア語へ翻訳\ntranslate_ru = translate.translate_text(\n Text=input_text,\n SourceLanguageCode=\"ja\",\n TargetLanguageCode=\"ru\"\n)[\"TranslatedText\"].encode(\"UTF-8\")\n# 結果を表示\nprint(\"○ 翻訳後(ロシア語):\\n{}\".format(translate_ru))\nprint(\"------------------------------------\")\n" }, { "alpha_fraction": 0.7841529846191406, "alphanum_fraction": 0.811475396156311, "avg_line_length": 13.680000305175781, "blob_id": "a1cd7cf204a7f03557c5b79f77676b1a0511dde7", "content_id": "755cf1dad18bfa476ba3d331620a41b88b663829", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 672, "license_type": "permissive", "max_line_length": 44, "num_lines": 25, "path": "/aws/README.md", "repo_name": "HumanAcademy-AI-Cource/16Sample", "src_encoding": "UTF-8", "text": "# 16Sample\n\n## AWSを使った翻訳プログラム\n\n### 準備\nAWSと接続するための設定を事前に済ませる\n\n### 実行\n```sh\n# プログラム内に書かれている文書を翻訳\npython translate1.py\n\n# translate2.txtに書かれている英語の文章を日本語に翻訳\npython translate2.py\n\n# 英語の文章を入力し日本語に翻訳\npython translate3.py\n\n# translate4.txtに書かれている日本語の文章を様々な言語に翻訳\npython translate4.py\n\n# translate5.txtに書かれている日本語の文章を連鎖的に様々な言語へ翻訳し、\n# 途中経過として日本語への再翻訳を行う\npython translate5.py\n```" }, { "alpha_fraction": 0.587837815284729, "alphanum_fraction": 0.5962837934494019, "avg_line_length": 22.68000030517578, "blob_id": "3832e4826d334e5a5b4f76c163392bd84dff53c7", "content_id": "5aea56d0df8a8894d153d46440514ed84a69f8fa", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 732, "license_type": "permissive", "max_line_length": 50, "num_lines": 25, "path": "/aws/translate2.py", "repo_name": "HumanAcademy-AI-Cource/16Sample", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# ライブラリのインポート\nimport boto3\n\n\n# テキストファイルから文章を読み出す\nwith open(\"./translate2.txt\") as f:\n input_text = f.read()\n\n# AWSを使った翻訳の準備\ntranslate = boto3.client(service_name=\"translate\")\n# テキストファイルから読み込んだ文章を翻訳\ntranslate_text = translate.translate_text(\n Text=input_text,\n SourceLanguageCode=\"en\",\n TargetLanguageCode=\"ja\"\n)[\"TranslatedText\"].encode(\"UTF-8\")\n\n# 結果を表示\nprint(\"------------------------------------\")\nprint(\"○ 翻訳前: \\n{}\".format(input_text))\nprint(\"------------------------------------\")\nprint(\"○ 翻訳後: \\n{}\".format(translate_text))\n" } ]
6
MatthewVaccaro/Coding-Challenges
https://github.com/MatthewVaccaro/Coding-Challenges
31a5144f86ce154250409c2abcf2025e1dc35bd1
200828c276d27335df3665b947fa6ca58d791924
616ce60f0ad508fcc7ae15288487916ead978312
refs/heads/master
2023-02-13T05:14:19.207686
2021-01-10T20:17:06
2021-01-10T20:17:06
294,792,111
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.49713632464408875, "alphanum_fraction": 0.5154638886451721, "avg_line_length": 25.454545974731445, "blob_id": "0d811177f88d82a3be2c13db54d27903c61bd99a", "content_id": "24ebce2d64389bdadb9c5fe707c356751e74b7d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 873, "license_type": "no_license", "max_line_length": 66, "num_lines": 33, "path": "/10-25-20/consecutiveItems.py", "repo_name": "MatthewVaccaro/Coding-Challenges", "src_encoding": "UTF-8", "text": "# You are given a list of unique integers arr, and two integers a\n# and b. Your task is to find out whether or not a and b appear\n# consecutively in arr, and return a boolean value(True if a and b\n# are consecutive, False otherwise). It is guaranteed that a and b\n# are both present in arr.\n\n# def consecutive(arr, a, b):\n# result = False\n# for n in range(len(arr) - 1):\n# if arr[n] == a:\n# if arr[n + 1] == b:\n# result = True\n# break\n \n# if arr[n] == b:\n# if arr[n + 1] == a:\n# result = True\n# break\n# return result\n\n\n# print(consecutive([1, 3, 7, 5], 3, 7))\n\ndef consecutive(arr, a, b):\n A = arr.index(a)\n B = arr.index(b)\n if A - B == 1:\n return True\n else:\n return False\n\n\nprint(consecutive([1, 3, 7, 5], 3, 7))\n" }, { "alpha_fraction": 0.5771812200546265, "alphanum_fraction": 0.6577181220054626, "avg_line_length": 23.83333396911621, "blob_id": "b277f9a3c84f2b343c5717e9665dc25c46f126a2", "content_id": "4cc44e554f50745d673c93bc041e87474648252f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 447, "license_type": "no_license", "max_line_length": 116, "num_lines": 18, "path": "/10-31-20/squareSum.py", "repo_name": "MatthewVaccaro/Coding-Challenges", "src_encoding": "UTF-8", "text": "# https: // www.codewars.com/kata/515e271a311df0350d00000f/train/python\n# Complete the square sum function so that it squares each number passed into it and then sums the results together.\n\n# For example, for [1, 2, 2] it should return 9 because 1 ^ 2 + 2 ^ 2 + 2 ^ 2 = 9.\n\ndef square_sum(numbers):\n numArray = []\n for num in numbers:\n numArray.append(num ** 2)\n\n return sum(numArray)\n \n\n\nprint(square_sum([0, 3, 4, 5]))\n\n\n# 50\n" }, { "alpha_fraction": 0.6423611044883728, "alphanum_fraction": 0.6423611044883728, "avg_line_length": 27.799999237060547, "blob_id": "85b881b0d61cb680a08886677acf4a955c702f4b", "content_id": "d3bb5d028525b072eabc9f58459a5478e2d476ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 288, "license_type": "no_license", "max_line_length": 87, "num_lines": 10, "path": "/11-21-20/findSquare.py", "repo_name": "MatthewVaccaro/Coding-Challenges", "src_encoding": "UTF-8", "text": "def remove_duplicate_words(s):\n result = []\n for word in s.split(\" \"):\n if word not in result:\n result.append(word)\n\n return \" \".join(result)\n\nprint(remove_duplicate_words(\n 'alpha beta beta gamma gamma gamma delta alpha beta beta gamma gamma gamma delta'))\n" }, { "alpha_fraction": 0.40392157435417175, "alphanum_fraction": 0.45098039507865906, "avg_line_length": 22.090909957885742, "blob_id": "954055f9af79777ca8d9c3dc114f577ed667fc8c", "content_id": "bd324539f9b349ca5de5637abdc6c19378949fb9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 255, "license_type": "no_license", "max_line_length": 47, "num_lines": 11, "path": "/1-10-21/sortTheOdd.py", "repo_name": "MatthewVaccaro/Coding-Challenges", "src_encoding": "UTF-8", "text": "def sort_array(arr):\n odd = [ num for num in arr if num % 2 != 0]\n odd.sort()\n for i in range(len(arr)):\n if arr[i] % 2 != 0:\n arr[i] = odd[0]\n odd.pop(0)\n\n return arr\n \nprint(sort_array([5, 3, 2, 8, 1, 4]))\n\n" }, { "alpha_fraction": 0.5059760808944702, "alphanum_fraction": 0.541832685470581, "avg_line_length": 16.928571701049805, "blob_id": "3371ae7fa4461def9176878228214f8a1b1f0984", "content_id": "7eb3361c3fa3da91754e0619cba83fa6d56de3a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 251, "license_type": "no_license", "max_line_length": 40, "num_lines": 14, "path": "/1-10-21/delete_nth.py", "repo_name": "MatthewVaccaro/Coding-Challenges", "src_encoding": "UTF-8", "text": "def delete_nth(order, max_e):\n result = []\n\n for i in range(len(order)):\n counted = result.count(order[i])\n if counted != max_e:\n result.append(order[i])\n return result\n\n\n \n\n\nprint(delete_nth([20, 37, 20, 21], 1))\n" }, { "alpha_fraction": 0.51902174949646, "alphanum_fraction": 0.59375, "avg_line_length": 34.90243911743164, "blob_id": "2c036fb5682e75481d03aa40aaf6fb4df7c19d7d", "content_id": "02a7adebcc4c89e42d8d8f2829dd871bf652c88d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1472, "license_type": "no_license", "max_line_length": 120, "num_lines": 41, "path": "/10-19-20/sumTwoArray.py", "repo_name": "MatthewVaccaro/Coding-Challenges", "src_encoding": "UTF-8", "text": "# Your task is to create a function called sum_arrays() in Python or addArrays in Javascript, which takes two arrays\n# consisting of integers, and returns the sum of those two arrays.\n\n# The twist is that(for example)[3, 2, 9] does not equal 3 + 2 + 9, it would equal '3' + '2' + '9' converted to an\n# integer for this kata, meaning it would equal 329. The output should be an array of the the sum in a similar\n# fashion to the input(for example, if the sum is 341, you would return [3, 4, 1]). Examples are given below of what\n# two arrays should return.\n\n# [3, 2, 9], [1, 2] - -> [3, 4, 1]\n# [4, 7, 3], [1, 2, 3] - -> [5, 9, 6]\n# [1], [5, 7, 6] - -> [5, 7, 7]\n# If both arrays are empty, return an empty array.\n\n# In some cases, there will be an array containing a negative number as the first index in the array. In this case treat\n# the whole number as a negative number. See below:\n\n# [3, 2, 6, 6], [-7, 2, 2, 8] - -> [-3, 9, 6, 2] # 3266 + (-7228) = -3962\n\ndef sum_arrays(array1, array2):\n\n if len(array1) == 0 and len(array2) == 0:\n return []\n\n if len(array1) > 0:\n array1 = [ str(num) for num in array1]\n array1 = int(\"\".join(array1))\n else:\n array1 = 0\n if len(array2) > 0:\n array2 = [str(num) for num in array2]\n array2 = int(\"\".join(array2))\n else:\n array2 = 0\n \n\n return array1 + array2\n \n\nprint(sum_arrays([3, 2, 6, 6], [-7, 2, 2, 8]))\n\n# Edge Case [3,2,6,6],[-7,2,2,8] --> [-3,9,6,2]\n" }, { "alpha_fraction": 0.5299227833747864, "alphanum_fraction": 0.5627413392066956, "avg_line_length": 23.0930233001709, "blob_id": "edb1ecd11ef1dea8976ec48dc4e575d5d9f5a7b6", "content_id": "dddf9d4cebc43154b8fbff5a462179dfbc8ad554", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1036, "license_type": "no_license", "max_line_length": 156, "num_lines": 43, "path": "/maxMin.py", "repo_name": "MatthewVaccaro/Coding-Challenges", "src_encoding": "UTF-8", "text": "# def solve(arr):\n# arr.sort()\n# result = []\n\n# turn = True\n# for i in range(len(arr)):\n# if turn == True:\n# result.append(arr[-1])\n# arr.pop(-1)\n# else:\n# result.append(arr[0])\n# arr.pop(0)\n# turn = not turn\n\n# return result\n\n\n\n\n\n\n# print(solve([52, 77, 72, 44, 74, 76, 40]))\n# # [77,40,76,44,74,52,72\n\n\ndef meeting(s):\n newString = s.upper()\n newArray = newString.split(\";\")\n result = []\n for name in newArray:\n fullname = name.split(\":\")\n result.append(\"(\" + fullname[1]+\",\" + \" \" + fullname[0] + \")\")\n sortedResult = sorted(result)\n\n return \" \".join(sortedResult)\n\n\n # sortedArr = sorted(newArray)\n\n\nprint(meeting(\"Alexis:Wahl;John:Bell;Victoria:Schwarz;Abba:Dorny;Grace:Meta;Ann:Arno;Madison:STAN;Alex:Cornwell;Lewis:Kern;Megan:Stan;Alex:Korn\"))\n\n# \"(ARNO, ANN)(BELL, JOHN)(CORNWELL, ALEX)(DORNY, ABBA)(KERN, LEWIS)(KORN, ALEX)(META, GRACE)(SCHWARZ, VICTORIA)(STAN, MADISON)(STAN, MEGAN)(WAHL, ALEXIS)\")\n" }, { "alpha_fraction": 0.6729788780212402, "alphanum_fraction": 0.6766205430030823, "avg_line_length": 39.382354736328125, "blob_id": "e78059b0ca4748b58a4d72f782bb927ed021d86b", "content_id": "1ed71d9e5cf881d3688ca781fa9c946a1d5e9a74", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1381, "license_type": "no_license", "max_line_length": 232, "num_lines": 34, "path": "/10-31-20/stringTask.py", "repo_name": "MatthewVaccaro/Coding-Challenges", "src_encoding": "UTF-8", "text": "#  Petya started to attend programming lessons. On the first lesson his task was to write a simple program. The program was supposed to do the following: in the given string, consisting of uppercase and lowercase Latin letters, it:\n\n# deletes all the vowels,\n# inserts a character \".\" before each consonant,\n# replaces all uppercase consonants with corresponding lowercase ones.\n#  Vowels are letters \"A\", \"O\", \"Y\", \"E\", \"U\", \"I\", and the rest are consonants. The program's input is exactly one string, it should return the output as a single string, resulting after the program's processing the initial string.\n\n# Input:\n#  The first argument represents input string of Petya's program. This string only consists of uppercase and lowercase Latin letters.\n\n# Output:\n#  Return the resulting string.\n\n# Examples:\n\n# ('tour') = > '.t.r'\n# ('Codewars') = > '.c.d.w.r.s'\n# ('aBAcAba') = > '.b.c.b'\n\ndef string_task(s):\n vols = [\"a\", \"o\", \"y\", \"e\", \"u\", \"i\"]\n lowerLetters = s.lower()\n removeVols = [letter for letter in lowerLetters if letter not in vols]\n for i in range(len(removeVols)* 2 - 1):\n if removeVols[i] != '.':\n removeVols.insert(i + 1, '.')\n removeVols.insert(0, '.')\n removeVols.pop(-1)\n return \"\".join(removeVols)\n\n\nprint(string_task(\"Codewars\"))\n\n#! I'm not proud of this solution but the group wanted to move on so...\n" }, { "alpha_fraction": 0.6390423774719238, "alphanum_fraction": 0.6445671916007996, "avg_line_length": 26.149999618530273, "blob_id": "b3ad81717bbfca31ef0d1f9b73f5c66152913cf6", "content_id": "b5059452977770db92dd82f60ee043df97ebb24b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 543, "license_type": "no_license", "max_line_length": 90, "num_lines": 20, "path": "/1-10-21/uniqueInOrder.py", "repo_name": "MatthewVaccaro/Coding-Challenges", "src_encoding": "UTF-8", "text": "# Implement the function unique_in_order which takes as argument a sequence and returns a\n# list of items without any elements with the same value next to each other and preserving\n# the original order of elements.\n\ndef unique_in_order(iterable):\n result = []\n\n if len(iterable) > 0:\n result.append(iterable[0])\n\n for i in range(len(iterable)):\n if iterable[i] is not result[-1]:\n result.append(iterable[i])\n\n return result\n \n else:\n return result\n\nprint(unique_in_order('A'))\n" }, { "alpha_fraction": 0.8177340030670166, "alphanum_fraction": 0.8177340030670166, "avg_line_length": 100.5, "blob_id": "fb9a9c25713256f968db10d29c35b9f48ce94315", "content_id": "8a273f09f364d7b117743541b2363ceeae9c8406", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 203, "license_type": "no_license", "max_line_length": 182, "num_lines": 2, "path": "/README.md", "repo_name": "MatthewVaccaro/Coding-Challenges", "src_encoding": "UTF-8", "text": "# Coding-Challenges\nEvery Saturday and Sunday a few developers and I partake in coding challenges from Codewars.com - This repository will be used to track the challenges I've completed/failed overtime.\n" }, { "alpha_fraction": 0.38997820019721985, "alphanum_fraction": 0.4161219894886017, "avg_line_length": 20.85714340209961, "blob_id": "3cfc5661de5e9ddd6c835f5899a14cc6ecd354c4", "content_id": "de09e5e3ac7668bcd2f8c4437fb867f74365f80c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 459, "license_type": "no_license", "max_line_length": 62, "num_lines": 21, "path": "/11-22-20/buildTower.py", "repo_name": "MatthewVaccaro/Coding-Challenges", "src_encoding": "UTF-8", "text": "def tower_builder(n):\n star = \"*\"\n space = \" \"\n tower = []\n ends = []\n\n for i in range(1, n+1):\n if len(tower) == 0:\n value = (space * (n//2)) + star + (space * (n//2))\n tower.append(value)\n ends = [(n//2)-1, (n//2)+1]\n tower.append(space * (n+1) )\n for el in ends:\n tower[i][el] = star\n # ends[el] = ends[el] - 1\n\n return tower\n\n \n\nprint(tower_builder(7))\n" }, { "alpha_fraction": 0.6497859954833984, "alphanum_fraction": 0.6847360730171204, "avg_line_length": 36.83783721923828, "blob_id": "732a233fe0e141fa4114e06814b682b9b13ffa49", "content_id": "ebfdbf288bf5eee535c8430c27e2b0e2852d9f2b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1402, "license_type": "no_license", "max_line_length": 160, "num_lines": 37, "path": "/10-24-20/EnglishBeggars.py", "repo_name": "MatthewVaccaro/Coding-Challenges", "src_encoding": "UTF-8", "text": "# https://www.codewars.com/kata/59590976838112bfea0000fa/train/python\n# Born a misinterpretation of this kata, your task here is pretty simple: given an array of values and an amount of beggars, you are supposed to return an array\n# with the sum of what each beggar brings home, assuming they all take regular turns, from the first to the last.\n\n# For example: [1, 2, 3, 4, 5] for 2 beggars will return a result of[9, 6], as the first one takes[1, 3, 5], the\n# second collects[2, 4].\n\n# The same array with 3 beggars would have in turn have produced a better out come for the second beggar: [5, 7, 3],\n# as they will respectively take[1, 4], [2, 5] and [3].\n\n# Also note that not all beggars have to take the same amount of \"offers\", meaning that the length of the array is\n# not necessarily a multiple of n; length can be even shorter, in which case the last beggars will of course take\n# nothing(0).\n\n# Note: in case you don't get why this kata is about English beggars, then you are not familiar on how\n# religiously queues are taken in the kingdom;)\n\ndef beggars(values, n):\n result = []\n beggarList = []\n count = 0\n while count < n:\n for beggar in range(count, len(values), n ):\n beggarList.append(values[beggar])\n result.append(sum(beggarList))\n beggarList = []\n count += 1\n\n return result\n\n\n\n\n \n\n\nprint(beggars([1, 2, 3, 4, 5], 2))\n\n\n" }, { "alpha_fraction": 0.5951859951019287, "alphanum_fraction": 0.6652078628540039, "avg_line_length": 27.5625, "blob_id": "6b6d6083f615185c5d6507820a2d64173470797c", "content_id": "3081346cd41c649d3c62bd0534282da3a4e246f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 457, "license_type": "no_license", "max_line_length": 88, "num_lines": 16, "path": "/9-26-20/fillingAnArray.py", "repo_name": "MatthewVaccaro/Coding-Challenges", "src_encoding": "UTF-8", "text": "# https: // www.codewars.com/kata/571d42206414b103dc0006a1/train/python\n# We want an array, but not just any old array, an array with contents!\n# Write a function that produces an array with the numbers 0 to N-1 in it.\n# For example, the following code will result in an array containing the numbers 0 to 4:\n# arr(5) // = > [0, 1, 2, 3, 4]\n\n# KYU 8\n\ndef arr(n=0):\n arr = []\n for num in range(n):\n arr.append(num)\n return arr\n\n\nprint(arr(0))\n" }, { "alpha_fraction": 0.5649202466011047, "alphanum_fraction": 0.5956720113754272, "avg_line_length": 38.90909194946289, "blob_id": "97c50b192e86fc0bd9e699ca99df2fa267b31b80", "content_id": "bde16a1cdf01127d1d8090c3e560d6cf54d432c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 878, "license_type": "no_license", "max_line_length": 153, "num_lines": 22, "path": "/10-10-20/matrixAdditional.py", "repo_name": "MatthewVaccaro/Coding-Challenges", "src_encoding": "UTF-8", "text": "# https: // www.codewars.com/kata/526233aefd4764272800036f/train/python\n# Write a function that accepts two square matrices(N x N two dimensional arrays), and return the sum of the two. Both matrices being passed into the\n# function will be of size N x N(square), containing only integers.\n# How to sum two matrices:\n# Take each cell[n][m] from the first matrix, and add it with the same[n][m] cell from the second matrix. This will be cell[n][m] of the solution matrix.\n\ndef matrix_addition(a, b):\n matrix = []\n innerList = []\n\n for row in range(len(a)):\n for col in range(len(b)):\n innerList.append(a[row][col] + b[row][col])\n matrix.append(innerList)\n innerList = []\n return matrix\n \n \nprint(matrix_addition([[1, 2],\n [1, 2] ],\n [[2, 3],\n [2, 3]]))\n" }, { "alpha_fraction": 0.5334261655807495, "alphanum_fraction": 0.6183844208717346, "avg_line_length": 24.64285659790039, "blob_id": "371b05344113996024642b4d8040f5a350498635", "content_id": "1966fa85fd9b704534cb7c92bc1d8b0e099422ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 718, "license_type": "no_license", "max_line_length": 157, "num_lines": 28, "path": "/10-31-20/sumOfParts.py", "repo_name": "MatthewVaccaro/Coding-Challenges", "src_encoding": "UTF-8", "text": "# https: // www.codewars.com/kata/5ce399e0047a45001c853c2b/train/python\n# Let us consider this example(array written in general format):\n\n# ls = [0, 1, 3, 6, 10]\n\n# Its following parts:\n\n# ls = [0, 1, 3, 6, 10]\n# ls = [1, 3, 6, 10]\n# ls = [3, 6, 10]\n# ls = [6, 10]\n# ls = [10]\n# ls = []\n# The corresponding sums are(put together in a list): [20, 20, 19, 16, 10, 0]\n\n# The function parts_sums ( or its variants in other languages) will take as parameter a list ls and return a list of the sums of its parts as defined above.\n\n\ndef parts_sums(ls, cache={}):\n result = []\n for i in range(len(ls)):\n result.append(sum(ls[i:]))\n result.append(0)\n \n return result\n\n\nprint(parts_sums([0, 1, 3, 6, 10]))\n" }, { "alpha_fraction": 0.45571956038475037, "alphanum_fraction": 0.46494466066360474, "avg_line_length": 20.176469802856445, "blob_id": "04aadb64d9656e86457036e1abb4bbe97b47d508", "content_id": "ad49d6f6e45210b0838a0df9019a20b9d77497cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1084, "license_type": "no_license", "max_line_length": 54, "num_lines": 51, "path": "/leet-11-06-20/twoSums.py", "repo_name": "MatthewVaccaro/Coding-Challenges", "src_encoding": "UTF-8", "text": "# def twoSum(nums, target):\n# for i in range(len(nums)):\n# if target - nums[i] in nums:\n# findIndex = nums.index(target - nums[i])\n# if findIndex != i:\n# return [i,findIndex ]\n \n# # return findIndex\n\n\n# print(twoSum([3, 3]\n# ,6))\n\n\n# def myAtoi(s):\n# newNum = []\n# splitting = [char for char in s]\n# for el in splitting:\n# if el.isnumeric() == True or el == '-':\n# newNum.append(el)\n# backToString = \"\".join(newNum)\n# return int(backToString)\n\n\n# print(type (myAtoi(\n# \" -42\")))\n\ndef lengthOfLongestSubstring(s):\n window = 0\n queue = []\n max = 0\n\n if len(s) == 1:\n return 1\n\n\n while window < len(s):\n for i in range(window, len(s)):\n if s[i] not in queue:\n queue.append(s[i])\n else:\n if max < len(queue):\n max = len(queue)\n queue = []\n break\n window += 1\n \n return max\n\n\nprint(lengthOfLongestSubstring(\"jbpnbwwd\"))\n \n" }, { "alpha_fraction": 0.5247524976730347, "alphanum_fraction": 0.6237623691558838, "avg_line_length": 19.200000762939453, "blob_id": "1dbdff216a50c4c9435e2058fb8bc2cadd3104e6", "content_id": "c461a46b2c2a8eac2f0164f84fb5e7a75f429c16", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 101, "license_type": "no_license", "max_line_length": 50, "num_lines": 5, "path": "/11-21-20/smallestSum.py", "repo_name": "MatthewVaccaro/Coding-Challenges", "src_encoding": "UTF-8", "text": "def find_smallest_int(arr):\n return min(arr)\n\n\nprint(find_smallest_int([78, 56, -2, 12, 8, -33]))\n" }, { "alpha_fraction": 0.5413793325424194, "alphanum_fraction": 0.5701149702072144, "avg_line_length": 30.071428298950195, "blob_id": "f3b386d79328f96a52c7e0e4a1ea0dbe835d1d67", "content_id": "1ae68281fa5382272524a79caa7dd4cdd9661d67", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 870, "license_type": "no_license", "max_line_length": 93, "num_lines": 28, "path": "/10-10-20/longest.py", "repo_name": "MatthewVaccaro/Coding-Challenges", "src_encoding": "UTF-8", "text": "# https: // www.codewars.com/kata/59c5f4e9d751df43cf000035/train/python\n# The vowel substrings in the word codewarriors are o, e, a, i, o. The longest of these has a\n# length of 2. Given a lowercase string that has alphabetic characters only(both vowels and\n# consonants) and no spaces, return the length of the longest vowel substring. Vowels are\n# any of aeiou.\n\ndef solve(s):\n vols = ['a', 'e', 'i', 'o', 'u']\n currentCount = 0\n bestCount = 0\n for i in range(len(s)):\n if i + 1 < len(s):\n if s[i] in vols and s[i + 1] in vols:\n currentCount += 1\n else:\n if currentCount > bestCount:\n bestCount = currentCount\n currentCount = 0\n else:\n currentCount = 0\n \n return bestCount + 1\n\n\n\n\n\nprint(solve('codewarriors'))\n" }, { "alpha_fraction": 0.49340659379959106, "alphanum_fraction": 0.5197802186012268, "avg_line_length": 27.4375, "blob_id": "117d21002b423006b661b5746fffef405cd736f2", "content_id": "8c0a1a153541569953d2f98cfa8c74c1cdaa0a6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 910, "license_type": "no_license", "max_line_length": 106, "num_lines": 32, "path": "/11-5-20/twoPlayerGame.py", "repo_name": "MatthewVaccaro/Coding-Challenges", "src_encoding": "UTF-8", "text": "# given an array of numbers thats length is always more than 1 have two user play a game\n# Each user will take the a number off the array from the store going back and forth\n# If the there is a duplicate number then the user will continue to pull until ther are no more duplicates\n# Return the player with the most pulled from the array\n\ndef arrThing(arr):\n turn = 'p1'\n p1 = []\n p2 = []\n\n for i in range(len(arr)):\n if turn == 'p1':\n p1.append(arr[i])\n if i == len(arr) - 1:\n break\n if arr[i] != arr[i + 1]:\n turn = 'p2'\n else:\n p2.append(arr[i])\n if i == len(arr) - 1:\n break\n if arr[i] != arr[i + 1]:\n turn = 'p1'\n\n if len(p1) > len(p2):\n return 'Player 1 Won'\n else:\n return 'player 2 Won'\n \n\n\nprint(arrThing([1,3,3,4,4,4,5]))\n" }, { "alpha_fraction": 0.5752508640289307, "alphanum_fraction": 0.585841715335846, "avg_line_length": 25, "blob_id": "593cb22be7c9fb2f673ab3592812e0b7d148f4d6", "content_id": "e3b1c62a1ad8ecef44dbe05a6d46cbc78f67b0a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1794, "license_type": "no_license", "max_line_length": 98, "num_lines": 69, "path": "/10-18-20/leaderboards-failed.py", "repo_name": "MatthewVaccaro/Coding-Challenges", "src_encoding": "UTF-8", "text": "\n# n this kata you will be given a leaderboard of unique names for example:\n\n# ['John',\n# 'Brian',\n# 'Jim',\n# 'Dave',\n# 'Fred']\n# Then you will be given a list of strings for example:\n\n# ['Dave +1', 'Fred +4', 'Brian -1']\n# Then you sort the leaderboard.\n\n# The steps for our example would be:\n\n# # Dave up 1\n# ['John',\n# 'Brian',\n# 'Dave',\n# 'Jim',\n# 'Fred']\n# # Fred up 4\n# ['Fred',\n# 'John',\n# 'Brian',\n# 'Dave',\n# 'Jim']\n# # Brian down 1\n# ['Fred',\n# 'John',\n# 'Dave',\n# 'Brian',\n# 'Jim']\n# Then once you have done this you need to return the leaderboard.\n\n# All inputs will be valid. All strings in the second list will never ask to move a name up\n# higher or lower than possible eg. \"John +3\" could not be added to the end of the second input\n# list in the example above.\n\n# The strings in the second list will always be something in the leaderboard followed by a space\n# and a + or - sign followed by a number.\n\n\ndef leaderboard_sort(leaderboard, changes):\n for i, action in enumerate(changes):\n currentIndex = i\n nameAction = action.split(\" \")\n name = nameAction[0]\n action = nameAction[1][0]\n amount = int(nameAction[1][1])\n # print(action, amount)\n leaderboard.remove(name)\n if action == '+':\n leaderboard.insert(currentIndex - amount - 1, name)\n else: \n leaderboard.insert(currentIndex + amount, name)\n\n return leaderboard\n\n \n\n\nprint(leaderboard_sort(['John', 'Brian', 'Jim', 'Dave', 'Fred'], [\n 'Dave +1', 'Fred +4', 'Brian -1']))\n\n# test.assert_equals(leaderboard_sort(['John', 'Brian', 'Jim', 'Dave', 'Fred'], [\n# 'Dave +1', 'Fred +4', 'Brian -1']), ['Fred', 'John', 'Dave', 'Brian', 'Jim'])\n\n\n# Couldnt figure it out! :(" }, { "alpha_fraction": 0.5190133452415466, "alphanum_fraction": 0.5611510872840881, "avg_line_length": 29.40625, "blob_id": "806834518a88fc938d9ffd03ebfff1bf67b399ef", "content_id": "dfcf9af04163fb9513a5cc839b853fb3a13e250e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 973, "license_type": "no_license", "max_line_length": 94, "num_lines": 32, "path": "/9-26-20/gridIndex.py", "repo_name": "MatthewVaccaro/Coding-Challenges", "src_encoding": "UTF-8", "text": "# https: // www.codewars.com/kata/5f5802bf4c2cc4001a6f859e/train/python\n# You are given an n by n(square) grid of characters, for example:\n# [['m', 'y', 'e'],\n# ['x', 'a', 'm'],\n# ['p', 'l', 'e']]\n# You are also given a list of integers as input, for example:\n# [1, 3, 5, 8]\n# You have to find the characters in these indexes of the grid if you think of the indexes as:\n# [[1, 2, 3],\n# [4, 5, 6],\n# [7, 8, 9]]\n# Remember that the indexes start from one and not zero.\n# Then you output a string like this:\n# \"meal\"\n# All inputs will be valid.\n\n\ndef grid_index(grid, indexes):\n newList = []\n word = []\n for indexA in range(len(grid)):\n for indexB in range(len(grid)):\n newList.append(grid[indexA][indexB])\n\n for num in indexes:\n word.append(newList[num - 1])\n\n return \"\".join(word)\n\n\nprint(grid_index([['h', 'e', 'l', 'l'], ['o', 'c', 'o', 'd'], [\n 'e', 'w', 'a', 'r'], ['r', 'i', 'o', 'r']], [5, 7, 9, 3, 6, 6, 8, 8, 16, 13]))\n" }, { "alpha_fraction": 0.6106259226799011, "alphanum_fraction": 0.6237263679504395, "avg_line_length": 53.939998626708984, "blob_id": "2857e11fa4a7fba382d77e3be3d7a4c5228ffff5", "content_id": "e43a9637386eff3cb748e92c7fc784f258026e95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2748, "license_type": "no_license", "max_line_length": 247, "num_lines": 50, "path": "/code.md", "repo_name": "MatthewVaccaro/Coding-Challenges", "src_encoding": "UTF-8", "text": "### Reading Through the problem:\nKey things to understand when readng through a problem\n- What data type am my being given per arrgument? (string, int, list, set, dict...)\n- What am I garenteed with that arrgument? (len, negitive, big, small)\n- What is the required output data type (string, int, list, set, dict...)\n\nWhen you don't understand what the problem is asking for:\n- Look at multiple tests and try to compare the input to the output.\n- Re-read the question, slowly, and always out loud\n\n# Every questions is minpulating data - this can mean mutating, creating, sorting, adding, subtracting and so on\n\n### Once you understand the problem\n### Javascript\nIf you need to touch every item in a list once consider: \n- a Map / Filter / For Loop:\nMap: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/filter\n^^^ Great if you need to create a new array while iterating over an array\nFilter: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Map\n^^^ Great for comparing an item to each item being looped over\nFor Loop: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/for\n^^^ Great for when you're using an index - REMEMEBER a For loop is just incrementing or decromenting a number. Its not physical going through the items\n\nIf you need to change data time:\n\n\n## String to Other\nString -> Int\nparseInt(stringValue)\nparseInt: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/parseInt\n^^^ note this does not need dot notation\n\n----------------------------------------------------------------------------------------------------------------------------------------------------------------\n\nString -> Array:\nstring.split(' ')\nsplit: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/split\n^^^ add .split() to any string value and in the parentheses add a value to split on. If you want each word make sure your quotes have a space. If you want each letter remove the space. If you want to split on a symbal add the symble in the quotes.\nconst url = 'https://twitter.com/messages/19047593-3061847840\nExample: url.split('/') \nresult: ['https:', 'twitter.com' , 'messages' , '19047593-3061847840']\n\n----------------------------------------------------------------------------------------------------------------------------------------------------------------\n\nString -> Object:\nThere is no method that will do this. You would need to iterate of the string value one at time and create the object by hand\n\n----------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n## Int to others\n\n" }, { "alpha_fraction": 0.5036284327507019, "alphanum_fraction": 0.5631349682807922, "avg_line_length": 28.95652198791504, "blob_id": "bceab450e62cbaad04acc95845638f2261df5fa8", "content_id": "56f71d0b28d160eb903b4df1fba3f6df59217f52", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 689, "license_type": "no_license", "max_line_length": 84, "num_lines": 23, "path": "/10-10-20/invertValues.py", "repo_name": "MatthewVaccaro/Coding-Challenges", "src_encoding": "UTF-8", "text": "# https: // www.codewars.com/kata/5899dc03bc95b1bf1b0000ad\n# Given a set of numbers, return the additive inverse of each. Each positive becomes\n# negatives, and the negatives become positives.\n# invert([1, 2, 3, 4, 5]) == [-1, -2, -3, -4, -5]\n# invert([1, -2, 3, -4, 5]) == [-1, 2, -3, 4, -5]\n# invert([]) == []\n# You can assume that all values are integers. Do not mutate the input array/list.\n\ndef invert(arr):\n results = []\n if len(arr) == 0:\n return results\n else:\n for num in arr:\n if num < 0:\n results.append(abs(num))\n else:\n results.append(-abs(num))\n\n return results\n\n\nprint(invert([1, -2, 3, -4, 5]))\n" }, { "alpha_fraction": 0.626057505607605, "alphanum_fraction": 0.6582064032554626, "avg_line_length": 28.549999237060547, "blob_id": "bf3b26afe6dc78e22233863a72ed8303d0e8d6af", "content_id": "75f791de9940d2ede1952dd9f897a76b7d3bc6e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 591, "license_type": "no_license", "max_line_length": 78, "num_lines": 20, "path": "/10-24-20/stonesOnTheTable.py", "repo_name": "MatthewVaccaro/Coding-Challenges", "src_encoding": "UTF-8", "text": "# /https: // www.codewars.com/kata/5f70e4cce10f9e0001c8995a\n# There are some stones on Bob's table in a row, and each of them can be red,\n# green or blue, indicated by the characters R, G, and B.\n\n# Help Bob find the minimum number of stones he needs to remove from the table\n# so that the stones in each pair of adjacent stones have different colours.\n\ndef solution(stones):\n count = 0\n length = len(stones) - 1\n for n in range(length):\n if length > n:\n if stones[n] == stones[n + 1]:\n count += 1\n\n return count\n\n\n\nprint(solution(\"RGBRGBRGGB\"))\n" }, { "alpha_fraction": 0.6348921060562134, "alphanum_fraction": 0.6753597259521484, "avg_line_length": 40.185184478759766, "blob_id": "ae96fafcd774e44868d9f7d349d61f0522b27ca8", "content_id": "f4f8f85793de38886484e0c7ac60d9284d1ed2d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1112, "license_type": "no_license", "max_line_length": 198, "num_lines": 27, "path": "/9-26-20/casinoChips.py", "repo_name": "MatthewVaccaro/Coding-Challenges", "src_encoding": "UTF-8", "text": "# https: // www.codewars.com/kata/5e0b72d2d772160011133654/train/python\n# You are given three piles of casino chips: white, green and black chips:\n\n# the first pile contains only white chips\n# the second pile contains only green chips\n# the third pile contains only black chips\n# Each day you take exactly two chips of different colors and head to the casino. You can choose any color, but you are not allowed to take two chips of the same color in a day.\n\n# You will be given an array representing the number of chips of each color and your task is to return the maximum number of days you can pick the chips. Each day you need to take exactly two chips.\n\n# solve([1, 1, 1]) = 1, because after you pick on day one, there will be only one chip left\n# solve([1, 2, 1]=2, you can pick twice; you pick two chips on day one then on day two\n# solve([4, 1, 1])=2\n\ndef solve(arr, count=0):\n arr.sort()\n print(\"--->\", arr)\n if arr[0] == 0 and arr[1] == 0:\n return count\n else:\n count += 1\n arr[1] -= 1\n arr[2] -= 1\n return solve(arr, count)\n\n\nprint(solve([8, 2, 8]))\n" }, { "alpha_fraction": 0.580152690410614, "alphanum_fraction": 0.5954198241233826, "avg_line_length": 20.83333396911621, "blob_id": "3586f0bc5604dd6f5a043fb75faeadd64c50c1b4", "content_id": "f8b59f8a3f6bc1cbfad7c3aac5aefce22ff0e954", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 262, "license_type": "no_license", "max_line_length": 41, "num_lines": 12, "path": "/11-22-20/formTheLargest.py", "repo_name": "MatthewVaccaro/Coding-Challenges", "src_encoding": "UTF-8", "text": "def max_number(n):\n result = []\n numArr = [int(num) for num in str(n)]\n print(numArr)\n for i in range(len(numArr)):\n result.append(str(max(numArr)))\n numArr.remove(max(numArr))\n\n\n return int(\"\".join(result))\n\nprint(max_number(7389))\n" }, { "alpha_fraction": 0.6899224519729614, "alphanum_fraction": 0.7558139562606812, "avg_line_length": 27.66666603088379, "blob_id": "0c288ff924035a8d406a7abca50719e16b39c552", "content_id": "77abc1584a0727fc4b48ce72c32f9ff1acb05d3f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 258, "license_type": "no_license", "max_line_length": 69, "num_lines": 9, "path": "/10-24-20/stringRepeate.py", "repo_name": "MatthewVaccaro/Coding-Challenges", "src_encoding": "UTF-8", "text": "# https://www.codewars.com/kata/57a0e5c372292dd76d000d7e/train/python\n# Write a function called repeat_str which repeats\n# the given string src exactly count times.\n\ndef repeat_str(repeat, string):\n return string * repeat\n\n\nprint(repeat_str(3, 'hello '))\n" }, { "alpha_fraction": 0.7363013625144958, "alphanum_fraction": 0.7454338073730469, "avg_line_length": 53.75, "blob_id": "d70077744e356320231fd9435564e6bbc52519c3", "content_id": "c47f49cb3c1e4940a1076b5c680454a3caf4915a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 876, "license_type": "no_license", "max_line_length": 144, "num_lines": 16, "path": "/10-25-20/manhattanTraversal.py", "repo_name": "MatthewVaccaro/Coding-Challenges", "src_encoding": "UTF-8", "text": "# The distance formula can be used to find the distance between two points. What if we were trying to walk from point A to point B,\n# but there were buildings in the way? We would need some other formula..but which?\n# Manhattan Distance\n# Manhattan distance is the distance between two points in a grid(like the grid-like street geography of the New York borough of Manhattan)\n# calculated by only taking a vertical and / or horizontal path.\n# Complete the function that accepts two points and returns the Manhattan Distance between the two points.\n# The points are arrays or tuples containing the x and y coordinate in the grid. You can think of x as the row in the grid, and y as the column.\n\ndef manhattan_distance(pointA, pointB):\n row = abs(pointA[0] - pointB[0]) \n col = abs(pointA[1] - pointB[1])\n\n return row + col\n\n\nprint(manhattan_distance([5, 4], [3, 2]))\n" }, { "alpha_fraction": 0.5720994472503662, "alphanum_fraction": 0.6116021871566772, "avg_line_length": 27.28125, "blob_id": "72f3704f5c013c0b3f19b5fee2d10b7ce2feabfc", "content_id": "8f62f7120104825cba9b3d4f750a90b4b065a1b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3631, "license_type": "no_license", "max_line_length": 169, "num_lines": 128, "path": "/9-5-20/codewars.py", "repo_name": "MatthewVaccaro/Coding-Challenges", "src_encoding": "UTF-8", "text": "# 8 KYU\n# It's pretty straightforward. Your goal is to create a function that removes the first and last characters of a string. You're given one parameter, the original string.\n# You don't have to worry with strings with less than two characters.\n\ndef remove_char(s):\n letterList = [letter for letter in s]\n letterList.pop(0)\n letterList.pop(len(letterList)-1)\n\n return \"\".join(letterList)\n\n\n# 7 KYU\n# Complete the function to convert an integer into a string of the Turkish name.\n\n# input will always be an integer 0-99\n# output should always be lower case.\n\n# Forming the Turkish names for the numbers 0-99 is very straightforward:\n\n# units(0-9) and tens(10, 20, 30, etc.) each have their own unique name\n# all other numbers are simply[tens] + [unit], like twenty one in English.\n# Unlike English, Turkish does not have \"teen\"-suffixed numbers\n# e.g. 13 would be directly translated as \"ten three\" rather than \"thirteen\" in English.\n\nnamesAndNumbers = {\n \"on\": 10,\n \"yirmi\": 20,\n \"otuz\": 30,\n \"kırk\": 40,\n \"elli\": 50,\n \"altmış\": 60,\n \"yetmiş\": 70,\n \"seksen\": 80,\n \"doksan\": 90,\n \"sıfır\": 0,\n \"bir\": 1,\n \"iki\": 2,\n \"üç\": 3,\n \"dört\": 4,\n \"beş\": 5,\n \"altı\": 6,\n \"yedi\": 7,\n \"sekiz\": 8,\n \"dokuz\": 9,\n\n}\n\n\ndef get_turkish_number(num):\n turkishNumber = []\n number = str(num)\n if num < 10 or num % 10 == 0:\n for name in namesAndNumbers:\n if num == namesAndNumbers.get(name):\n turkishNumber.append(name)\n else:\n tens = num - int(number[1])\n print(tens)\n ones = num - tens\n\n for name in namesAndNumbers:\n if tens == namesAndNumbers.get(name) or ones == namesAndNumbers.get(name):\n turkishNumber.append(name)\n\n return \" \".join(turkishNumber)\n\n\n# KYU 7\n# Given a string \"abc\" and assuming that each letter in the string has a value equal to its\n# position in the alphabet, our string will have a value of 1 + 2 + 3 = 6. This means that:\n# a = 1, b = 2, c = 3 ....z = 26.\n\n# You will be given a list of strings and your task will be to return the values of the strings\n# as explained above multiplied by the position of that string in the list. For our purpose\n# position begins with 1.\n\n# nameValue[\"abc\", \"abc abc\"] should return [6, 24] because of[6 * 1, 12 * 2]. Note how spaces\n# are ignored.\n\n# \"abc\" has a value of 6, while \"abc abc\" has a value of 12. Now, the value at position 1 is\n# multiplied by 1 while the value at position 2 is multiplied by 2.\n\nalphabet = \"abcdefghijklmnopqrstuvwxyz\"\nlistAlphabet = [letter for letter in alphabet]\n\n\ndef name_value(my_list):\n\n points = 0\n pointList = []\n\n for i, el in enumerate(my_list):\n for letter in el:\n if letter is not \" \":\n pointValue = listAlphabet.index(letter) + 1\n points += pointValue\n pointList.append(points * (i + 1))\n points = 0\n return pointList\n\n\n# KYU 6\n# Given n, take the sum of the digits of n. If that value has\n# more than one digit, continue reducing in this way until a single-digit\n# number is produced. This is only applicable to the natural numbers.\n\n# Example:\n# 16 - -> 1 + 6 = 7\n# 942 --> 9 + 4 + 2 = 15 --> 1 + 5 = 6\n# 132189 --> 1 + 3 + 2 + 1 + 8 + 9 = 24 --> 2 + 4 = 6\n# 493193 --> 4 + 9 + 3 + 1 + 9 + 3 = 29 --> 2 + 9 = 11 --> 1 + 1 = 2\n\ndef digital_root(n):\n\n if n < 10:\n return n\n\n sum = 0\n\n numList = [int(num) for num in str(n)]\n for num in numList:\n sum = sum + num\n\n if sum > 9:\n return digital_root(sum)\n else:\n return sum\n" }, { "alpha_fraction": 0.695652186870575, "alphanum_fraction": 0.7101449370384216, "avg_line_length": 26.799999237060547, "blob_id": "5c4f787b0b8d9e689f44ee706696a8350668f3f2", "content_id": "5560cbaee2902aa8ebb5766f15d6cf953f434028", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 138, "license_type": "no_license", "max_line_length": 72, "num_lines": 5, "path": "/10-25-20/underPressure.py", "repo_name": "MatthewVaccaro/Coding-Challenges", "src_encoding": "UTF-8", "text": "# Code as fast as you can! You need to double the integer and return it.\ndef double_integer(i):\n return i * 2\n\nprint(double_integer(2))" }, { "alpha_fraction": 0.716312050819397, "alphanum_fraction": 0.7570921778678894, "avg_line_length": 42.38461685180664, "blob_id": "3e0efb0cde1e45f273921b21da8047841c502b1c", "content_id": "93037c326800e026d9744de1b2461f57de14004c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 564, "license_type": "no_license", "max_line_length": 94, "num_lines": 13, "path": "/9-28-20/willHeSurvive.py", "repo_name": "MatthewVaccaro/Coding-Challenges", "src_encoding": "UTF-8", "text": "# https: // www.codewars.com/kata/59ca8246d751df55cc00014c/train/python\n# A hero is on his way to the castle to complete his mission. However, he's been told that the\n# castle is surrounded with a couple of powerful dragons! each dragon takes 2 bullets to be\n# defeated, our hero has no idea how many bullets he should carry.. Assuming he's gonna grab a\n# specific given number of bullets and move forward to fight another specific given number of\n# dragons, will he survive?\n\n\ndef hero(bullets, dragons):\n return bullets // 2 >= dragons\n\n\nprint(hero(100, 40))\n" } ]
31
kshithijiyer/TensorNetwork
https://github.com/kshithijiyer/TensorNetwork
282ce53ff90aada40a0a3abf75354b5da7c29ced
bf47f8635eca33edf95c73d50d48d861f628aaec
a3a17dcbbb551df3f574277662e116611b03c866
refs/heads/master
2020-05-20T06:49:12.302953
2019-05-06T22:01:28
2019-05-06T22:01:28
185,437,815
0
0
Apache-2.0
2019-05-07T16:14:29
2019-05-07T15:37:23
2019-05-06T22:01:29
null
[ { "alpha_fraction": 0.5827028751373291, "alphanum_fraction": 0.617685854434967, "avg_line_length": 35.242767333984375, "blob_id": "fae2d3c632a76f87b7ea65586c3979265e304828", "content_id": "08bbac19c3cfc622fe3a5e1456792f96366d21a9", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 28814, "license_type": "permissive", "max_line_length": 77, "num_lines": 795, "path": "/tensornetwork/tensornetwork_test.py", "repo_name": "kshithijiyer/TensorNetwork", "src_encoding": "UTF-8", "text": "# Copyright 2019 The TensorNetwork Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport numpy as np\nimport tensorflow as tf\n# Prepare for TF 2.0 migration\ntf.enable_v2_behavior()\n# pylint: disable=g-import-not-at-top\nfrom tensornetwork import tensornetwork\n\n\nclass NetworkTest(tf.test.TestCase):\n\n def test_sanity_check(self):\n net = tensornetwork.TensorNetwork()\n net.add_node(np.eye(2), \"a\")\n net.check_correct()\n\n def test_node_names(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.eye(2), \"a\", axis_names=[\"e0\", \"e1\"])\n self.assertEqual(a.name, \"a\")\n self.assertEqual(a[0].name, \"e0\")\n self.assertEqual(a[1].name, \"e1\")\n\n def test_single_contract(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.array([1.0] * 5), \"a\")\n b = net.add_node(np.array([1.0] * 5), \"b\")\n e = net.connect(a[0], b[0])\n c = net.contract(e)\n net.check_correct()\n val = c.get_tensor().numpy()\n self.assertAlmostEqual(val, 5.0)\n\n def test_disconnect_edge(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.array([1.0] * 5), \"a\")\n b = net.add_node(np.array([1.0] * 5), \"b\")\n e = net.connect(a[0], b[0])\n self.assertFalse(e.is_dangling())\n dangling_edge_1, dangling_edge_2 = net.disconnect(e)\n net.check_correct(check_connected=False)\n self.assertTrue(dangling_edge_1.is_dangling())\n self.assertTrue(dangling_edge_2.is_dangling())\n self.assertEqual(a.get_edge(0), dangling_edge_1)\n self.assertEqual(b.get_edge(0), dangling_edge_2)\n\n def test_set_tensor(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.ones(2))\n self.assertAllClose(a.get_tensor(), np.ones(2))\n a.set_tensor(np.zeros(2))\n self.assertAllClose(a.get_tensor(), np.zeros(2))\n\n def test_has_nondangling_edge(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.ones(2))\n self.assertFalse(a.has_nondangling_edge())\n b = net.add_node(np.ones((2, 2)))\n net.connect(b[0], b[1])\n self.assertTrue(b.has_nondangling_edge())\n\n def test_large_nodes(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.zeros([5, 6, 7, 8, 9]), \"a\")\n b = net.add_node(np.zeros([5, 6, 7, 8, 9]), \"b\")\n for i in range(5):\n net.connect(a[i], b[i])\n net.check_correct()\n\n def test_small_matmul(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.zeros([10, 10]), name=\"a\")\n b = net.add_node(np.zeros([10, 10]), name=\"b\")\n edge = net.connect(a[0], b[0], \"edge\")\n net.check_correct()\n c = net.contract(edge, name=\"a * b\")\n self.assertEqual(c.get_tensor().shape, [10, 10])\n net.check_correct()\n\n def test_direct_trace(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.ones([10, 10]), name=\"a\")\n edge = net.connect(a[0], a[1], \"edge\")\n net.check_correct()\n result = net._contract_trace(edge)\n net.check_correct()\n self.assertAlmostEqual(result.get_tensor().numpy(), 10.0)\n\n def test_double_trace(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.ones([10, 10, 10, 10]), name=\"a\")\n edge1 = net.connect(a[0], a[1], \"edge1\")\n edge2 = net.connect(a[2], a[3], \"edge2\")\n net.check_correct()\n net._contract_trace(edge1)\n net.check_correct()\n val = net._contract_trace(edge2)\n net.check_correct()\n self.assertAlmostEqual(val.get_tensor().numpy(), 100.0)\n\n def test_indirect_trace(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.ones([10, 10]), name=\"a\")\n edge = net.connect(a[0], a[1], \"edge\")\n net.check_correct()\n val = net.contract(edge)\n net.check_correct()\n self.assertAlmostEqual(val.get_tensor().numpy(), 10.0)\n\n def test_real_physics(self):\n # Calcuate the expected value in numpy\n a_vals = np.ones([2, 3, 4, 5])\n b_vals = np.ones([4, 6, 7])\n c_vals = np.ones([5, 6, 8])\n contract1 = np.tensordot(a_vals, b_vals, [[2], [0]])\n contract2 = np.tensordot(c_vals, contract1, [[0], [2]])\n final_result = np.trace(contract2, axis1=0, axis2=4)\n # Build the network\n net = tensornetwork.TensorNetwork()\n a = net.add_node(a_vals, name=\"T\")\n b = net.add_node(b_vals, name=\"A\")\n c = net.add_node(c_vals, name=\"B\")\n e1 = net.connect(a[2], b[0], \"edge\")\n e2 = net.connect(c[0], a[3], \"edge2\")\n e3 = net.connect(b[1], c[1], \"edge3\")\n net.check_correct()\n node_result = net.contract(e1)\n self.assertAllClose(node_result.get_tensor(), contract1)\n net.check_correct()\n node_result = net.contract(e2)\n self.assertAllClose(node_result.get_tensor(), contract2)\n net.check_correct()\n val = net.contract(e3)\n net.check_correct()\n self.assertAllClose(val.get_tensor(), final_result)\n\n def test_real_physics_with_tensors(self):\n # Calcuate the expected value in numpy\n a_vals = np.ones([2, 3, 4, 5])\n b_vals = np.ones([4, 6, 7])\n c_vals = np.ones([5, 6, 8])\n contract1 = np.tensordot(a_vals, b_vals, [[2], [0]])\n contract2 = np.tensordot(c_vals, contract1, [[0], [2]])\n final_result = np.trace(contract2, axis1=0, axis2=4)\n # Build the network\n net = tensornetwork.TensorNetwork()\n a = net.add_node(tf.ones([2, 3, 4, 5]), name=\"T\")\n b = net.add_node(tf.ones([4, 6, 7]), name=\"A\")\n c = net.add_node(tf.ones([5, 6, 8]), name=\"B\")\n e1 = net.connect(a[2], b[0], \"edge\")\n e2 = net.connect(c[0], a[3], \"edge2\")\n e3 = net.connect(b[1], c[1], \"edge3\")\n net.check_correct()\n node_result = net.contract(e1)\n self.assertAllClose(node_result.get_tensor(), contract1)\n net.check_correct()\n node_result = net.contract(e2)\n self.assertAllClose(node_result.get_tensor(), contract2)\n net.check_correct()\n val = net.contract(e3)\n net.check_correct()\n self.assertAllClose(val.get_tensor(), final_result)\n\n def test_real_physics_naive_contraction(self):\n # Calcuate the expected value in numpy\n a_vals = np.ones([2, 3, 4, 5])\n b_vals = np.ones([4, 6, 7])\n c_vals = np.ones([5, 6, 8])\n contract1 = np.tensordot(a_vals, b_vals, [[2], [0]])\n contract2 = np.tensordot(c_vals, contract1, [[0], [2]])\n final_result = np.trace(contract2, axis1=0, axis2=4)\n # Build the network\n net = tensornetwork.TensorNetwork()\n a = net.add_node(tf.ones([2, 3, 4, 5]), name=\"T\")\n b = net.add_node(tf.ones([4, 6, 7]), name=\"A\")\n c = net.add_node(tf.ones([5, 6, 8]), name=\"B\")\n e1 = net.connect(a[2], b[0], \"edge\")\n e2 = net.connect(c[0], a[3], \"edge2\")\n e3 = net.connect(b[1], c[1], \"edge3\")\n for edge in [e1, e2, e3]:\n net.contract(edge)\n val = net.get_final_node()\n self.assertEqual(val.get_tensor().shape, [8, 2, 3, 7])\n self.assertAllClose(val.get_tensor(), final_result)\n\n def test_with_tensors(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(tf.eye(2) * 2, name=\"T\")\n b = net.add_node(tf.eye(2) * 3, name=\"A\")\n e1 = net.connect(a[0], b[0], \"edge\")\n e2 = net.connect(a[1], b[1], \"edge2\")\n net.check_correct()\n net.contract(e1)\n net.check_correct()\n val = net.contract(e2)\n net.check_correct()\n self.assertAlmostEqual(val.get_tensor().numpy(), 12.0)\n\n def test_contract_dangling_edge(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.array([1]))\n e = a[0]\n with self.assertRaises(ValueError):\n net.contract(e)\n\n def test_double_edge_contract(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.eye(2))\n e = net.connect(a[0], a[1], name=\"edge\")\n net.contract(e)\n with self.assertRaises(ValueError):\n net.contract(e)\n\n def test_contract_trace_dangling_edge(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.array([1]))\n e = a[0]\n with self.assertRaises(ValueError):\n net._contract_trace(e)\n\n def test_node2_contract_trace(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.zeros([3, 3, 1]))\n b = net.add_node(np.zeros([1]))\n net.connect(b[0], a[2])\n trace_edge = net.connect(a[0], a[1])\n net._contract_trace(trace_edge)\n net.check_correct()\n\n def test_contract_fall_through_name(self):\n net = tensornetwork.TensorNetwork()\n node = net.add_node(np.eye(2), name=\"Identity Matrix\")\n self.assertEqual(node.name, \"Identity Matrix\")\n edge = net.connect(node[0], node[1], name=\"Trace Edge\")\n self.assertEqual(edge.name, \"Trace Edge\")\n final_result = net.contract(edge, name=\"Trace Of Identity\")\n self.assertEqual(final_result.name, \"Trace Of Identity\")\n\n def test_non_connected(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.array([2, 2]))\n b = net.add_node(np.array([2, 2]))\n net.connect(a[0], b[0])\n c = net.add_node(np.array([2, 2]))\n d = net.add_node(np.array([2, 2]))\n net.connect(c[0], d[0])\n with self.assertRaises(ValueError):\n net.check_connected()\n\n def test_node_get_dim_bad_axis(self):\n node = tensornetwork.Node(np.eye(2), \"a\", axis_names=[\"1\", \"2\"])\n with self.assertRaises(ValueError):\n node.get_dimension(10)\n\n def test_bad_trace_contract(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.array([2]))\n b = net.add_node(np.array([2]))\n e = net.connect(a[0], b[0])\n with self.assertRaises(ValueError):\n net._contract_trace(e)\n\n def test_double_edge_axis(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.array([2]), name=\"a\")\n b = net.add_node(np.array([2]), name=\"b\")\n net.connect(a[0], b[0])\n with self.assertRaises(ValueError):\n net.connect(a[0], b[0])\n\n def test_named_axis(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.eye(2), axis_names=[\"alpha\", \"beta\"])\n e = net.connect(a[\"alpha\"], a[\"beta\"])\n b = net.contract(e)\n self.assertAlmostEqual(b.get_tensor().numpy(), 2.0)\n\n def test_mixed_named_axis(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.eye(2) * 2.0, axis_names=[\"alpha\", \"beta\"])\n b = net.add_node(np.eye(2) * 3.0)\n e1 = net.connect(a[\"alpha\"], b[0])\n # Axes should still be indexable by numbers even with naming.\n e2 = net.connect(a[1], b[1])\n net.contract(e1)\n result = net.contract(e2)\n self.assertAlmostEqual(result.get_tensor().numpy(), 12.0)\n\n def test_duplicate_name(self):\n net = tensornetwork.TensorNetwork()\n with self.assertRaises(ValueError):\n net.add_node(np.eye(2), axis_names=[\"test\", \"test\"])\n\n def test_bad_axis_name_length(self):\n net = tensornetwork.TensorNetwork()\n with self.assertRaises(ValueError):\n # This should have 2 names, not 1.\n net.add_node(np.eye(2), axis_names=[\"need_2_names\"])\n\n def test_bad_axis_name_connect(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.eye(2), axis_names=[\"test\", \"names\"])\n with self.assertRaises(ValueError):\n a.get_edge(\"bad_name\")\n\n def test_node_edge_ordering(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.zeros((2, 3, 4)))\n e2 = a[0]\n e3 = a[1]\n e4 = a[2]\n self.assertEqual(a.get_tensor().shape, (2, 3, 4))\n a.reorder_edges([e4, e2, e3])\n net.check_correct()\n self.assertEqual(a.get_tensor().shape, (4, 2, 3))\n self.assertEqual(e2.axis1, 1)\n self.assertEqual(e3.axis1, 2)\n self.assertEqual(e4.axis1, 0)\n\n def test_trace_edge_ordering(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.zeros((2, 2, 3)))\n e2 = net.connect(a[1], a[0])\n e3 = a[2]\n with self.assertRaises(ValueError):\n a.reorder_edges([e2, e3])\n\n def test_mismatch_edge_ordering(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.zeros((2, 3)))\n e2_a = a[0]\n b = net.add_node(np.zeros((2,)))\n e_b = b[0]\n with self.assertRaises(ValueError):\n a.reorder_edges([e2_a, e_b])\n\n def test_complicated_edge_reordering(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.zeros((2, 3, 4)))\n b = net.add_node(np.zeros((2, 5)))\n c = net.add_node(np.zeros((3,)))\n d = net.add_node(np.zeros((4, 5)))\n e_ab = net.connect(a[0], b[0])\n e_bd = net.connect(b[1], d[1])\n e_ac = net.connect(a[1], c[0])\n e_ad = net.connect(a[2], d[0])\n net.contract(e_bd)\n a.reorder_edges([e_ac, e_ab, e_ad])\n net.check_correct()\n self.assertEqual(a.get_tensor().shape, (3, 2, 4))\n\n def test_edge_reorder_axis_names(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.zeros((2, 3, 4, 5)), axis_names=[\"a\", \"b\", \"c\", \"d\"])\n edge_a = a[\"a\"]\n edge_b = a[\"b\"]\n edge_c = a[\"c\"]\n edge_d = a[\"d\"]\n a.reorder_edges([edge_c, edge_b, edge_d, edge_a])\n self.assertEqual(a.get_tensor().shape, (4, 3, 5, 2))\n self.assertEqual(a.axis_names, [\"c\", \"b\", \"d\", \"a\"])\n\n def test_outer_product(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.ones((2, 4, 5)), name=\"A\")\n b = net.add_node(np.ones((4, 3, 6)), name=\"B\")\n c = net.add_node(np.ones((3, 2)), name=\"C\")\n net.connect(a[1], b[0])\n net.connect(a[0], c[1])\n net.connect(b[1], c[0])\n # Purposely leave b's 3rd axis undefined.\n d = net.outer_product(a, b, name=\"D\")\n net.check_correct()\n self.assertEqual(d.get_tensor().shape, (2, 4, 5, 4, 3, 6))\n self.assertAllClose(d.get_tensor().numpy(), np.ones((2, 4, 5, 4, 3, 6)))\n self.assertEqual(d.name, \"D\")\n\n def test_outer_product_final_nodes(self):\n net = tensornetwork.TensorNetwork()\n edges = []\n for i in range(1, 5):\n edges.append(net.add_node(tf.ones(i))[0])\n final_node = net.outer_product_final_nodes(edges)\n self.assertAllClose(final_node.get_tensor(), np.ones([1, 2, 3, 4]))\n self.assertEqual(final_node.get_all_edges(), edges)\n\n def test_outer_product_final_nodes_not_contracted(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.ones(2))\n b = net.add_node(np.ones(2))\n e = net.connect(a[0], b[0])\n with self.assertRaises(ValueError):\n net.outer_product_final_nodes([e])\n\n def test_add_axis_names(self):\n a = tensornetwork.Node(np.eye(2), \"A\", [\"ignore1\", \"ignore2\"])\n a.add_axis_names([\"a\", \"b\"])\n self.assertEqual(a.axis_names, [\"a\", \"b\"])\n\n def test_reorder_axes(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.zeros((2, 3, 4)))\n b = net.add_node(np.zeros((3, 4, 5)))\n c = net.add_node(np.zeros((2, 4, 5)))\n net.connect(a[0], c[0])\n net.connect(b[0], a[1])\n net.connect(a[2], c[1])\n net.connect(b[2], c[2])\n a.reorder_axes([2, 0, 1])\n net.check_correct()\n self.assertEqual(a.get_tensor().shape, (4, 2, 3))\n\n def test_flattening_standard_edges(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.zeros((2, 3, 5)), name=\"A\")\n b = net.add_node(np.zeros((2, 3, 4, 5)), name=\"B\")\n e1 = net.connect(a[0], b[0], \"Edge_1_1\")\n e2 = net.connect(a[2], b[3], \"Edge_2_3\")\n edge_a_1 = a[1]\n edge_b_1 = b[1]\n edge_b_2 = b[2]\n new_edge = net.flatten_edges([e1, e2], new_edge_name=\"New Edge\")\n self.assertEqual(a.get_tensor().shape, (3, 10))\n self.assertEqual(b.get_tensor().shape, (3, 4, 10))\n self.assertEqual(a.edges, [edge_a_1, new_edge])\n self.assertEqual(b.edges, [edge_b_1, edge_b_2, new_edge])\n net.check_correct()\n\n def test_flattening_dangling_edges(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.zeros((2, 3, 4, 5)), name=\"A\")\n e1 = a[0]\n e2 = a[1]\n e3 = a[2]\n e4 = a[3]\n flattened_edge = net.flatten_edges([e1, e3], new_edge_name=\"New Edge\")\n self.assertEqual(a.get_tensor().shape, (3, 5, 8))\n self.assertEqual(a.edges, [e2, e4, flattened_edge])\n self.assertEqual(flattened_edge.name, \"New Edge\")\n net.check_correct()\n\n def test_flatten_edges_different_nodes(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.eye(2))\n b = net.add_node(np.eye(2))\n c = net.add_node(np.eye(2))\n e1 = net.connect(a[0], b[0])\n e2 = net.connect(a[1], c[0])\n net.connect(b[1], c[1])\n with self.assertRaises(ValueError):\n net.flatten_edges([e1, e2])\n\n def test_flatten_trace_edges(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.zeros((2, 3, 4, 3, 5, 5)))\n c = net.add_node(np.zeros((2, 4)))\n e1 = net.connect(a[1], a[3])\n e2 = net.connect(a[4], a[5])\n external_1 = net.connect(a[0], c[0])\n external_2 = net.connect(c[1], a[2])\n new_edge = net.flatten_edges([e1, e2], \"New Edge\")\n net.check_correct()\n self.assertEqual(a.get_tensor().shape, (2, 4, 15, 15))\n self.assertEqual(a.edges, [external_1, external_2, new_edge, new_edge])\n self.assertEqual(new_edge.name, \"New Edge\")\n\n def test_flatten_consistent_result(self):\n net_noflat = tensornetwork.TensorNetwork()\n a_val = np.random.normal(size=(3, 5, 5, 6))\n b_val = np.random.normal(size=(5, 6, 4, 5))\n # Create non flattened example to compare against.\n a_noflat = net_noflat.add_node(a_val)\n b_noflat = net_noflat.add_node(b_val)\n e1 = net_noflat.connect(a_noflat[1], b_noflat[3])\n e2 = net_noflat.connect(a_noflat[3], b_noflat[1])\n e3 = net_noflat.connect(a_noflat[2], b_noflat[0])\n a_dangling_noflat = a_noflat[0]\n b_dangling_noflat = b_noflat[2]\n for edge in [e1, e2, e3]:\n net_noflat.contract(edge)\n noflat_result_node = net_noflat.get_final_node()\n noflat_result_node.reorder_edges([a_dangling_noflat, b_dangling_noflat])\n noflat_result = noflat_result_node.get_tensor().numpy()\n # Create network with flattening\n net_flat = tensornetwork.TensorNetwork()\n a_flat = net_flat.add_node(a_val)\n b_flat = net_flat.add_node(b_val)\n e1 = net_flat.connect(a_flat[1], b_flat[3])\n e2 = net_flat.connect(a_flat[3], b_flat[1])\n e3 = net_flat.connect(a_flat[2], b_flat[0])\n a_dangling_flat = a_flat[0]\n b_dangling_flat = b_flat[2]\n final_edge = net_flat.flatten_edges([e1, e2, e3])\n flat_result_node = net_flat.contract(final_edge)\n flat_result_node.reorder_edges([a_dangling_flat, b_dangling_flat])\n flat_result = flat_result_node.get_tensor().numpy()\n self.assertAllClose(flat_result, noflat_result)\n\n def test_flatten_consistent_tensor(self):\n net = tensornetwork.TensorNetwork()\n a_val = np.random.normal(size=(2, 3, 4, 5))\n b_val = np.random.normal(size=(3, 5, 4, 2))\n a = net.add_node(a_val)\n b = net.add_node(b_val)\n e1 = net.connect(a[0], b[3])\n e2 = net.connect(b[1], a[3])\n e3 = net.connect(a[1], b[0])\n net.flatten_edges([e3, e1, e2])\n net.check_correct()\n\n # Check expected values.\n a_final = np.reshape(np.transpose(a_val, (2, 1, 0, 3)), (4, 30))\n b_final = np.reshape(np.transpose(b_val, (2, 0, 3, 1)), (4, 30))\n self.assertAllClose(a.get_tensor().numpy(), a_final)\n self.assertAllClose(b.get_tensor().numpy(), b_final)\n\n def test_flatten_trace_consistent_result(self):\n net_noflat = tensornetwork.TensorNetwork()\n a_val = np.random.normal(size=(5, 6, 6, 7, 5, 7))\n a_noflat = net_noflat.add_node(a_val)\n e1 = net_noflat.connect(a_noflat[0], a_noflat[4])\n e2 = net_noflat.connect(a_noflat[1], a_noflat[2])\n e3 = net_noflat.connect(a_noflat[3], a_noflat[5])\n for edge in [e1, e2, e3]:\n net_noflat.contract(edge)\n noflat_result = net_noflat.get_final_node().get_tensor().numpy()\n # Create network with flattening\n net_flat = tensornetwork.TensorNetwork()\n a_flat = net_flat.add_node(a_val)\n e1 = net_flat.connect(a_flat[0], a_flat[4])\n e2 = net_flat.connect(a_flat[1], a_flat[2])\n e3 = net_flat.connect(a_flat[3], a_flat[5])\n final_edge = net_flat.flatten_edges([e1, e2, e3])\n flat_result = net_flat.contract(final_edge).get_tensor().numpy()\n self.assertAllClose(flat_result, noflat_result)\n\n def test_flatten_trace_consistent_tensor(self):\n net = tensornetwork.TensorNetwork()\n a_val = np.random.normal(size=(5, 3, 4, 4, 5))\n a = net.add_node(a_val)\n e1 = net.connect(a[0], a[4])\n e2 = net.connect(a[3], a[2])\n net.flatten_edges([e2, e1])\n net.check_correct()\n # Check expected values.\n a_final = np.reshape(np.transpose(a_val, (1, 2, 0, 3, 4)), (3, 20, 20))\n self.assertAllClose(a.get_tensor().numpy(), a_final)\n\n def test_add_subnetwork(self):\n net1 = tensornetwork.TensorNetwork()\n net2 = tensornetwork.TensorNetwork()\n a = net1.add_node(np.eye(2) * 2)\n b = net1.add_node(np.eye(2) * 3)\n e1 = net1.connect(a[0], b[0])\n c = net2.add_node(np.eye(2) * 4)\n net2.add_subnetwork(net1)\n self.assertIn(a, net2.nodes_set)\n self.assertIn(b, net2.nodes_set)\n e2 = net2.connect(c[0], a[1])\n e3 = net2.connect(c[1], b[1])\n net2.check_correct()\n for edge in [e1, e2, e3]:\n net2.contract(edge)\n result = net2.get_final_node()\n self.assertAllClose(result.get_tensor().numpy(), 48.0)\n\n def test_merge_networks(self):\n net1 = tensornetwork.TensorNetwork()\n net2 = tensornetwork.TensorNetwork()\n a = net1.add_node(np.eye(2) * 2)\n b = net1.add_node(np.eye(2) * 3)\n e1 = net1.connect(a[0], b[0])\n c = net2.add_node(np.eye(2) * 4)\n net3 = tensornetwork.TensorNetwork.merge_networks([net1, net2])\n self.assertIn(a, net3.nodes_set)\n self.assertIn(b, net3.nodes_set)\n e2 = net3.connect(c[0], a[1])\n e3 = net3.connect(c[1], b[1])\n net3.check_correct()\n for edge in [e1, e2, e3]:\n net3.contract(edge)\n result = net3.get_final_node()\n self.assertAllClose(result.get_tensor().numpy(), 48.0)\n\n def test_flatten_edges_between(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.ones((3, 4, 5)))\n b = net.add_node(np.ones((5, 4, 3)))\n net.connect(a[0], b[2])\n net.connect(a[1], b[1])\n net.connect(a[2], b[0])\n net.flatten_edges_between(a, b)\n net.check_correct()\n self.assertAllClose(a.get_tensor().numpy(), np.ones((60,)))\n self.assertAllClose(b.get_tensor().numpy(), np.ones((60,)))\n\n def test_flatten_edges_between_no_edges(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.ones((3)))\n b = net.add_node(np.ones((3)))\n self.assertEqual(net.flatten_edges_between(a, b), None)\n\n def test_flatten_all_edges(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.ones((3, 3, 5, 6, 2, 2)))\n b = net.add_node(np.ones((5, 6, 7)))\n c = net.add_node(np.ones((7,)))\n trace_edge1 = net.connect(a[0], a[1])\n trace_edge2 = net.connect(a[4], a[5])\n split_edge1 = net.connect(a[2], b[0])\n split_edge2 = net.connect(a[3], b[1])\n ok_edge = net.connect(b[2], c[0])\n flat_edges = net.flatten_all_edges()\n net.check_correct()\n self.assertEqual(len(flat_edges), 3)\n self.assertNotIn(trace_edge1, flat_edges)\n self.assertNotIn(trace_edge2, flat_edges)\n self.assertNotIn(split_edge1, flat_edges)\n self.assertNotIn(split_edge2, flat_edges)\n self.assertIn(ok_edge, flat_edges)\n\n def test_contract_between(self):\n net = tensornetwork.TensorNetwork()\n a_val = np.random.normal(size=(2, 3, 4, 5))\n b_val = np.random.normal(size=(3, 5, 4, 2))\n a = net.add_node(a_val)\n b = net.add_node(b_val)\n net.connect(a[0], b[3])\n net.connect(b[1], a[3])\n net.connect(a[1], b[0])\n edge_a = a[2]\n edge_b = b[2]\n c = net.contract_between(a, b, name=\"New Node\")\n c.reorder_edges([edge_a, edge_b])\n net.check_correct()\n # Check expected values.\n a_flat = np.reshape(np.transpose(a_val, (2, 1, 0, 3)), (4, 30))\n b_flat = np.reshape(np.transpose(b_val, (2, 0, 3, 1)), (4, 30))\n final_val = np.matmul(a_flat, b_flat.T)\n self.assertAllClose(c.get_tensor().numpy(), final_val)\n self.assertEqual(c.name, \"New Node\")\n\n def test_contract_between_outer_product(self):\n net = tensornetwork.TensorNetwork()\n a_val = np.random.normal(size=(2, 3, 4))\n b_val = np.random.normal(size=(5, 6, 7))\n a = net.add_node(a_val)\n b = net.add_node(b_val)\n c = net.contract_between(a, b, allow_outer_product=True)\n self.assertEqual(c.get_tensor().shape, (2, 3, 4, 5, 6, 7))\n\n def test_contract_between_no_outer_product(self):\n net = tensornetwork.TensorNetwork()\n a_val = np.random.normal(size=(2, 3, 4))\n b_val = np.random.normal(size=(5, 6, 7))\n a = net.add_node(a_val)\n b = net.add_node(b_val)\n with self.assertRaises(ValueError):\n net.contract_between(a, b)\n\n def test_contract_between_trace_edges(self):\n net = tensornetwork.TensorNetwork()\n a_val = np.random.normal(size=(3, 3))\n final_val = np.trace(a_val)\n a = net.add_node(a_val)\n net.connect(a[0], a[1])\n b = net.contract_between(a, a)\n net.check_correct()\n self.assertAllClose(b.get_tensor().numpy(), final_val)\n\n def test_join_dangling(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.ones((3,)))\n b = net.add_node(np.ones((3,)))\n net.connect(a[0], b[0])\n net.check_correct()\n\n def test_dynamic_network_sizes(self):\n\n @tf.contrib.eager.defun\n def f(x, n):\n x_slice = x[:n]\n net = tensornetwork.TensorNetwork()\n n1 = net.add_node(x_slice)\n n2 = net.add_node(x_slice)\n e = net.connect(n1[0], n2[0])\n return net.contract(e).get_tensor()\n\n x = tf.ones(10)\n self.assertAllClose(f(x, tf.convert_to_tensor(2)), 2.0)\n self.assertAllClose(f(x, tf.convert_to_tensor(3)), 3.0)\n\n def test_dynamic_network_sizes_flatten_standard(self):\n\n @tf.contrib.eager.defun\n def f(x, n):\n x_slice = x[..., :n]\n net = tensornetwork.TensorNetwork()\n n1 = net.add_node(x_slice)\n n2 = net.add_node(x_slice)\n net.connect(n1[0], n2[0])\n net.connect(n1[1], n2[1])\n net.connect(n1[2], n2[2])\n return net.contract(net.flatten_edges_between(n1, n2)).get_tensor()\n\n x = tf.ones((3, 4, 5))\n self.assertAllClose(f(x, tf.convert_to_tensor(2)), 24.0)\n self.assertAllClose(f(x, tf.convert_to_tensor(3)), 36.0)\n\n def test_dynamic_network_sizes_flatten_trace(self):\n\n @tf.contrib.eager.defun\n def f(x, n):\n x_slice = x[..., :n]\n net = tensornetwork.TensorNetwork()\n n1 = net.add_node(x_slice)\n net.connect(n1[0], n1[2])\n net.connect(n1[1], n1[3])\n return net.contract(net.flatten_edges_between(n1, n1)).get_tensor()\n\n x = tf.ones((3, 4, 3, 4, 5))\n self.assertAllClose(f(x, tf.convert_to_tensor(2)), tf.ones((2,)) * 12)\n self.assertAllClose(f(x, tf.convert_to_tensor(3)), tf.ones((3,)) * 12)\n\n def test_split_node(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(tf.zeros((2, 3, 4, 5, 6)))\n left_edges = []\n for i in range(3):\n left_edges.append(a[i])\n right_edges = []\n for i in range(3, 5):\n right_edges.append(a[i])\n left, right, _ = net.split_node(a, left_edges, right_edges)\n net.check_correct()\n self.assertAllClose(left.get_tensor(), np.zeros((2, 3, 4, 24)))\n self.assertAllClose(right.get_tensor(), np.zeros((24, 5, 6)))\n\n def test_split_node_mixed_order(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(tf.zeros((2, 3, 4, 5, 6)))\n left_edges = []\n for i in [0, 2, 4]:\n left_edges.append(a[i])\n right_edges = []\n for i in [1, 3]:\n right_edges.append(a[i])\n left, right, _ = net.split_node(a, left_edges, right_edges)\n net.check_correct()\n self.assertAllClose(left.get_tensor(), np.zeros((2, 4, 6, 15)))\n self.assertAllClose(right.get_tensor(), np.zeros((15, 3, 5)))\n\n def test_split_node_full_svd(self):\n net = tensornetwork.TensorNetwork()\n random_matrix = np.random.rand(10, 10)\n unitary1, _, unitary2 = np.linalg.svd(random_matrix)\n singular_values = np.array(range(10))\n val = unitary1.dot(np.diag(singular_values).dot(unitary2.T))\n a = net.add_node(val)\n e1 = a[0]\n e2 = a[1]\n _, s, _, _, = net.split_node_full_svd(a, [e1], [e2])\n net.check_correct()\n self.assertAllClose(s.get_tensor(), np.diag(np.arange(9, -1, -1)))\n\n def test_batch_usage(self):\n def build_tensornetwork(tensors):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(tensors[0])\n b = net.add_node(tensors[1])\n e = net.connect(a[0], b[0])\n return net.contract(e).get_tensor()\n\n tensors = [tf.ones((5, 10)), tf.ones((5, 10))]\n result = tf.map_fn(build_tensornetwork, tensors, dtype=tf.float32)\n self.assertAllClose(result, tf.ones(5) * 10)\n\nif __name__ == \"__main__\":\n tf.test.main()\n\n" }, { "alpha_fraction": 0.6530612111091614, "alphanum_fraction": 0.6734693646430969, "avg_line_length": 36.625, "blob_id": "1a9599dfc67077df2eda90c238586017a8f39f8c", "content_id": "253d7aee2711273e68218527a16d68cf34b1f686", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1813, "license_type": "permissive", "max_line_length": 111, "num_lines": 48, "path": "/experiments/MERA/misc_mera.py", "repo_name": "kshithijiyer/TensorNetwork", "src_encoding": "UTF-8", "text": "\n# Copyright 2019 The TensorNetwork Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport tensorflow as tf\nimport sys\nsys.path.append('../')\nimport numpy as np\nimport ncon as ncon\n\n\ndef pad_tensor(tensor, new_shape):\n paddings = np.zeros((len(tensor.shape),2)).astype(np.int32)\n for n in range(len(new_shape)):\n paddings[n,1] = max(new_shape[n] - tensor.shape[n],0)\n return tf.pad(tensor, paddings)\n\ndef all_same_chi(*tensors):\n chis = [t.shape[n] for t in tensors for n in range(len(t.shape))]\n return np.all([c == chis[0] for c in chis])\n\[email protected]\ndef u_update_svd(wIn):\n shape = wIn.shape\n st, ut,vt = tf.linalg.svd(tf.reshape(wIn,(shape[0] * shape[1], shape[2] * shape[3])), full_matrices=False)\n return -tf.reshape(ncon.ncon([ut, tf.conj(vt)],[[-1,1],[-2,1]]), shape)\n\n\ndef u_update_svd_numpy(wIn):\n shape = wIn.shape\n ut, st, vt = np.linalg.svd(tf.reshape(wIn,(shape[0] * shape[1], shape[2] * shape[3])), full_matrices=False)\n return -tf.reshape(ncon.ncon([ut, vt],[[-1,1],[1, -2]]), shape)\n\[email protected]\ndef w_update_svd(wIn):\n shape = wIn.shape\n st, ut, vt = tf.linalg.svd(tf.reshape(wIn,(shape[0] * shape[1], shape[2])),full_matrices=False)\n return -tf.reshape(ncon.ncon([ut, tf.conj(vt)],[[-1,1],[-2,1]]), shape) \n\n\n" }, { "alpha_fraction": 0.4618397653102875, "alphanum_fraction": 0.49573540687561035, "avg_line_length": 42.00815200805664, "blob_id": "b4df68f3d422b4d1320f5d6571722fa751b2a658", "content_id": "098881caa9082d402ee149cb3560411e4c80ded5", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 31656, "license_type": "permissive", "max_line_length": 176, "num_lines": 736, "path": "/experiments/MERA/modified_binary_mera.py", "repo_name": "kshithijiyer/TensorNetwork", "src_encoding": "UTF-8", "text": "\n# Copyright 2019 The TensorNetwork Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\" \nmodified binary MERA optimization\nparts of the following code are based on code written by Glen Evenbly (c) for www.tensors.net, (v1.1) \n\"\"\"\n\nimport sys\nsys.path.append('../')\nNUM_THREADS = 4\nimport os\nos.environ['OMP_NUM_THREADS'] = str(NUM_THREADS)\nimport tensorflow as tf\nimport copy\nimport numpy as np\nimport time\nimport pickle\nimport ncon as ncon\nimport misc_mera\nfrom sys import stdout\n\n\nconfig = tf.ConfigProto()\nconfig.intra_op_parallelism_threads = NUM_THREADS\nconfig.inter_op_parallelism_threads = NUM_THREADS\ntf.enable_eager_execution(config=config)\ntf.enable_v2_behavior()\n\[email protected]\ndef ascending_super_operator(hamAB, hamBA, w_isometry, v_isometry, unitary,\n refsym):\n \n \"\"\"\n ascending super operator for a modified binary MERA\n ascends 'hamAB' and 'hamBA' up one layer\n Parameters:\n -------------------------\n hamAB, hamBA: tf.Tensor\n local Hamiltonian terms\n w_isometry: tf.Tensor\n v_isometry: tf.Tensor\n unitary: tf.Tensor\n refsym: bool \n if true, enforce reflection symmetry\n Returns: \n ------------------------\n (hamABout, hamBAout): tf.Tensor, tf.Tensor\n\n \"\"\"\n \n indList1 = [[6, 4, 1, 2], [1, 3, -3], [6, 7, -1], [2, 5, 3, 9],\n [4, 5, 7, 10], [8, 9, -4], [8, 10, -2]]\n indList2 = [[3, 4, 1, 2], [5, 6, -3], [5, 7, -1], [1, 2, 6, 9],\n [3, 4, 7, 10], [8, 9, -4], [8, 10, -2]]\n indList3 = [[5, 7, 2, 1], [8, 9, -3], [8, 10, -1], [4, 2, 9, 3],\n [4, 5, 10, 6], [1, 3, -4], [7, 6, -2]]\n indList4 = [[3, 6, 2, 5], [2, 1, -3], [3, 1, -1], [5, 4, -4], [6, 4, -2]]\n\n hamBAout = ncon.ncon([\n hamAB, w_isometry,\n tf.conj(w_isometry), unitary,\n tf.conj(unitary), v_isometry,\n tf.conj(v_isometry)\n ], indList1)\n if refsym:\n hamBAout = hamBAout + tf.transpose(hamBAout, (1, 0, 3, 2))\n else:\n hamBAout = hamBAout + ncon.ncon([\n hamAB, w_isometry,\n tf.conj(w_isometry), unitary,\n tf.conj(unitary), v_isometry,\n tf.conj(v_isometry)\n ], indList3)\n\n hamBAout = hamBAout + ncon.ncon([\n hamBA, w_isometry,\n tf.conj(w_isometry), unitary,\n tf.conj(unitary), v_isometry,\n tf.conj(v_isometry)\n ], indList2)\n hamABout = ncon.ncon([\n hamBA, v_isometry,\n tf.conj(v_isometry), w_isometry,\n tf.conj(w_isometry)\n ], indList4)\n\n return hamABout, hamBAout\n\n\[email protected]\ndef descending_super_operator(rhoAB, rhoBA, w_isometry, v_isometry, unitary,\n refsym):\n \"\"\"\n descending super operator for a modified binary MERA\n \"\"\"\n\n indList1 = [[9, 3, 4, 2], [-3, 5, 4], [-1, 10, 9], [-4, 7, 5, 6],\n [-2, 7, 10, 8], [1, 6, 2], [1, 8, 3]]\n indList2 = [[3, 6, 2, 5], [1, 7, 2], [1, 9, 3], [-3, -4, 7, 8],\n [-1, -2, 9, 10], [4, 8, 5], [4, 10, 6]]\n indList3 = [[3, 9, 2, 4], [1, 5, 2], [1, 8, 3], [7, -3, 5, 6],\n [7, -1, 8, 10], [-4, 6, 4], [-2, 10, 9]]\n indList4 = [[3, 6, 2, 5], [-3, 1, 2], [-1, 1, 3], [-4, 4, 5], [-2, 4, 6]]\n\n rhoABout = 0.5 * ncon.ncon([\n rhoBA, w_isometry,\n tf.conj(w_isometry), unitary,\n tf.conj(unitary), v_isometry,\n tf.conj(v_isometry)\n ], indList1)\n \n if refsym:\n rhoABout = rhoABout + tf.transpose(rhoABout, (1, 0, 3, 2))\n else:\n rhoABout = rhoABout + 0.5 * ncon.ncon([\n rhoBA, w_isometry,\n tf.conj(w_isometry), unitary,\n tf.conj(unitary), v_isometry,\n tf.conj(v_isometry)\n ], indList3)\n\n rhoBAout = 0.5 * ncon.ncon([\n rhoBA, w_isometry,\n tf.conj(w_isometry), unitary,\n tf.conj(unitary), v_isometry,\n tf.conj(v_isometry)\n ], indList2)\n\n rhoBAout = rhoBAout + 0.5 * ncon.ncon([\n rhoAB, v_isometry,\n tf.conj(v_isometry), w_isometry,\n tf.conj(w_isometry)\n ], indList4)\n\n return rhoABout, rhoBAout\n\n\[email protected]\ndef get_env_disentangler(hamAB,hamBA,rhoBA,w,v,u,refsym):\n\n indList1 = [[7,8,10,-1],[4,3,9,2],[10,-3,9],[7,5,4],[8,-2,5,6],[1,-4,2],[1,6,3]]\n indList2 = [[7,8,-1,-2],[3,6,2,5],[1,-3,2],[1,9,3],[7,8,9,10],[4,-4,5],[4,10,6]]\n indList3 = [[7,8,-2,10],[3,4,2,9],[1,-3,2],[1,5,3],[-1,7,5,6],[10,-4,9],[8,6,4]]\n\n uEnv = ncon.ncon([hamAB,rhoBA,w,tf.conj(w),tf.conj(u),v,tf.conj(v)],indList1)\n if refsym:\n uEnv = uEnv + tf.transpose(uEnv,(1,0,3,2))\n else:\n uEnv = uEnv + ncon.ncon([hamAB,rhoBA,w,tf.conj(w),tf.conj(u),v,tf.conj(v)],indList3)\n \n uEnv = uEnv + ncon.ncon([hamBA,rhoBA,w,tf.conj(w),tf.conj(u),v,tf.conj(v)],indList2)\n\n return uEnv\n\[email protected]\ndef get_env_w_isometry(hamAB, hamBA, rhoBA, rhoAB, w_isometry, v_isometry, unitary):\n \"\"\"\n Parameters:\n \"\"\"\n indList1 = [[7,8,-1,9],[4,3,-3,2],[7,5,4],[9,10,-2,11],[8,10,5,6],[1,11,2],[1,6,3]]\n indList2 = [[1,2,3,4],[10,7,-3,6],[-1,11,10],[3,4,-2,8],[1,2,11,9],[5,8,6],[5,9,7]]\n indList3 = [[5,7,3,1],[10,9,-3,8],[-1,11,10],[4,3,-2,2],[4,5,11,6],[1,2,8],[7,6,9]]\n indList4 = [[3,7,2,-1],[5,6,4,-3],[2,1,4],[3,1,5],[7,-2,6]]\n\n wEnv = ncon.ncon([hamAB,rhoBA,tf.conj(w_isometry),unitary,tf.conj(unitary),v_isometry,tf.conj(v_isometry)],\n indList1)\n wEnv = wEnv + ncon.ncon([hamBA,rhoBA,tf.conj(w_isometry),unitary,tf.conj(unitary),v_isometry,tf.conj(v_isometry)],\n indList2)\n \n wEnv = wEnv + ncon.ncon([hamAB,rhoBA,tf.conj(w_isometry),unitary,tf.conj(unitary),v_isometry,tf.conj(v_isometry)],\n indList3)\n \n wEnv = wEnv + ncon.ncon([hamBA,rhoAB,v_isometry,tf.conj(v_isometry),tf.conj(w_isometry)],\n indList4)\n\n return wEnv\n\[email protected]\ndef get_env_v_isometry(hamAB, hamBA, rhoBA, rhoAB, w_isometry, v_isometry, unitary):\n\n indList1 = [[6,4,1,3],[9,11,8,-3],[1,2,8],[6,7,9],[3,5,2,-2],[4,5,7,10],[-1,10,11]]\n indList2 = [[3,4,1,2],[8,10,9,-3],[5,6,9],[5,7,8],[1,2,6,-2],[3,4,7,11],[-1,11,10]]\n indList3 = [[9,10,11,-1],[3,4,2,-3],[1,8,2],[1,5,3],[7,11,8,-2],[7,9,5,6],[10,6,4]]\n indList4 = [[7,5,-1,4],[6,3,-3,2],[7,-2,6],[4,1,2],[5,1,3]]\n\n vEnv = ncon.ncon([hamAB,rhoBA,w_isometry,tf.conj(w_isometry),unitary,tf.conj(unitary),tf.conj(v_isometry)],indList1)\n vEnv = vEnv + ncon.ncon([hamBA,rhoBA,w_isometry,tf.conj(w_isometry),unitary,tf.conj(unitary),tf.conj(v_isometry)],indList2)\n vEnv = vEnv + ncon.ncon([hamAB,rhoBA,w_isometry,tf.conj(w_isometry),unitary,tf.conj(unitary),tf.conj(v_isometry)],indList3)\n vEnv = vEnv + ncon.ncon([hamBA,rhoAB,tf.conj(v_isometry),w_isometry,tf.conj(w_isometry)],indList4)\n\n return vEnv\n\n\[email protected]\ndef steady_state_density_matrices(nsteps, rhoAB, rhoBA, w_isometry, v_isometry, unitary, refsym):\n for n in range(nsteps):\n rhoAB_, rhoBA_ = descending_super_operator(rhoAB, rhoBA, w_isometry, v_isometry, unitary,\n refsym)\n rhoAB = 1/2 * (rhoAB_ + tf.conj(tf.transpose(rhoAB_,(2,3,0,1))))/ncon.ncon([rhoAB_],[[1,2,1,2]])\n rhoBA = 1/2 * (rhoBA_ + tf.conj(tf.transpose(rhoBA_,(2,3,0,1))))/ncon.ncon([rhoBA_],[[1,2,1,2]])\n \n return rhoAB, rhoBA\n\n\n\n\n#@tf.contrib.eager.defun #better not defun this function, it takes ages to compile the graph\ndef optimize_mod_binary_mera(hamAB_0, hamBA_0, rhoAB_0, rhoBA_0,\n wC, vC, uC,\n numiter=1000, refsym=False, nsteps_steady_state=4,\n verbose=0, opt_u=True, opt_vw=True, numpy_update_u=True):\n \"\"\"\n ------------------------\n adapted from Glen Evenbly (c) for www.tensors.net, (v1.1) - last modified 24/1/2019\n ------------------------\n optimization of a scale invariant modified binary MERA tensor network\n Parameters:\n ----------------------------\n hamAB_0, hamBA_0: tf.Tensor\n bottom-layer Hamiltonians in AB and BA sublattices\n rhoAB_0, rhoBA_0: tf.Tensor \n initial values for steady-state density matrices\n wC, vC, uC: list of tf.Tensor \n isometries (wC, vC) and disentanglers (uC) of the MERA, with \n bottom layers first \n numiter: int \n number of iteration steps \n refsym: bool \n impose reflection symmetry \n nsteps_steady_state: int \n number of power-methodf iteration steps for calculating the \n steady state density matrices \n verbose: int \n verbosity flag \n opt_u, opt_uv: bool \n if False, skip unitary or isometry optimization \n numpy_update_u: bool\n if True, use numpy svd to calculate update of disentanglers\n\n Returns: \n -------------------------------\n (wC, vC, uC, rhoAB, rhoBA, run_times, Energies)\n wC, vC, uC: list of tf.Tensor \n obtimized MERA tensors\n rhoAB, rhoBA: tf.Tensor \n steady state density matrices at the top layer \n run_times: list \n run times per iteration step \n Energies: list \n energies at each iteration step\n \"\"\"\n dtype = rhoAB_0.dtype\n \n\n hamAB = [0 for x in range(len(vC) + 1)]\n hamBA = [0 for x in range(len(vC) + 1)]\n rhoAB = [0 for x in range(len(vC) + 1)]\n rhoBA = [0 for x in range(len(vC) + 1)]\n \n hamAB[0] = hamAB_0\n hamBA[0] = hamBA_0\n \n chi1 = hamAB[0].shape[0]\n \n bias = tf.math.reduce_max(tf.linalg.eigvalsh(tf.reshape(hamAB[0],(chi1 * chi1, chi1 * chi1))))\n hamAB[0] = hamAB[0] - bias * tf.reshape(tf.eye(chi1 * chi1, dtype=dtype), (chi1, chi1, chi1, chi1))\n hamBA[0] = hamBA[0] - bias * tf.reshape(tf.eye(chi1 * chi1, dtype=dtype), (chi1, chi1, chi1, chi1))\n\n Energies = []\n run_times = []\n \n for k in range(numiter):\n t1 = time.time()\n rhoAB[-1], rhoBA[-1] = steady_state_density_matrices(nsteps_steady_state, rhoAB_0, rhoBA_0, wC[-1], vC[-1], uC[-1], refsym) \n for p in range(len(rhoAB)-2,-1,-1):\n rhoAB[p], rhoBA[p] = descending_super_operator(rhoAB[p+1],rhoBA[p+1],wC[p],vC[p],uC[p],refsym)\n\n if verbose > 0:\n if np.mod(k,10) == 1:\n Energies.append((ncon.ncon([rhoAB[0],hamAB[0]],[[1,2,3,4],[1,2,3,4]]) + \n ncon.ncon([rhoBA[0],hamBA[0]],[[1,2,3,4],[1,2,3,4]]))/4 + bias/2)\n stdout.write('\\rIteration: %i of %i: E = %.8f, err = %.16f at D = %i with %i layers' %\n (int(k),int(numiter), float(Energies[-1]), float(Energies[-1] + 4/np.pi,), int(wC[-1].shape[2]), len(wC)))\n stdout.flush()\n \n for p in range(len(wC)):\n if opt_u and (k % opt_u == 0):\n uEnv = get_env_disentangler(hamAB[p],hamBA[p],rhoBA[p+1],wC[p],vC[p],uC[p],refsym)\n if refsym:\n uEnv = uEnv + tf.transpose(uEnv,(1,0,3,2))\n if numpy_update_u:\n uC[p] = misc_mera.u_update_svd_numpy(uEnv)\n else:\n uC[p] = misc_mera.u_update_svd(uEnv)\n \n if opt_vw:\n wEnv = get_env_w_isometry(hamAB[p],hamBA[p],rhoBA[p+1],rhoAB[p+1],wC[p],vC[p],uC[p]) \n wC[p] = misc_mera.w_update_svd(wEnv)\n if refsym:\n vC[p] = wC[p]\n else:\n vEnv = get_env_v_isometry(hamAB[p],hamBA[p],rhoBA[p+1],rhoAB[p+1],wC[p],vC[p],uC[p])\n vC[p] = misc_mera.w_update_svd(vEnv)\n \n hamAB[p+1], hamBA[p+1] = ascending_super_operator(hamAB[p],hamBA[p],wC[p],vC[p],uC[p],refsym)\n \n run_times.append(time.time() - t1)\n if verbose > 2:\n print('time per iteration: ',run_times[-1])\n \n return wC, vC, uC, rhoAB[-1], rhoBA[-1], run_times, Energies\n\n\n\n\ndef increase_bond_dimension_by_adding_layers(chi_new, wC, vC, uC):\n \"\"\"\n increase the bond dimension of the MERA to `chi_new`\n by padding tensors in the last layer with zeros. If the desired `chi_new` cannot\n be obtained from padding, adds layers of Tensors\n the last layer is guaranteed to have uniform bond dimension\n\n Parameters:\n --------------------------------\n chi_new: int \n new bond dimenion\n wC, vC, uC: list of tf.Tensor \n MERA isometries and disentanglers\n\n\n Returns: \n --------------------------------\n (wC, vC, uC): list of tf.Tensors\n \"\"\"\n if misc_mera.all_same_chi(wC[-1], vC[-1], uC[-1]) and (wC[-1].shape[2] >= chi_new):\n #nothing to do here\n return wC, vC, uC\n elif misc_mera.all_same_chi(wC[-1], vC[-1], uC[-1]) and (wC[-1].shape[2] < chi_new): \n chi = min(chi_new, wC[-1].shape[0] * wC[-1].shape[1])\n wC[-1] = misc_mera.pad_tensor(wC[-1], [wC[-1].shape[0], wC[-1].shape[1], chi])\n vC[-1] = misc_mera.pad_tensor(vC[-1], [vC[-1].shape[0], vC[-1].shape[1], chi])\n wC_temp = copy.deepcopy(wC[-1])\n vC_temp = copy.deepcopy(vC[-1])\n uC_temp = copy.deepcopy(uC[-1])\n wC.append(misc_mera.pad_tensor(wC_temp, [chi, chi, chi]))\n vC.append(misc_mera.pad_tensor(vC_temp, [chi, chi, chi]))\n uC.append(misc_mera.pad_tensor(uC_temp, [chi, chi, chi, chi]))\n return increase_bond_dimension_by_adding_layers(chi_new, wC, vC, uC) \n\n elif not misc_mera.all_same_chi(wC[-1], vC[-1], uC[-1]):\n raise ValueError('chis of last layer have to be all the same!')\n\n\ndef increase_bond_dimension_by_padding(chi_new, wC, vC, uC):\n \"\"\"\n increase the bond dimension of the MERA to `chi_new`\n by padding tensors in all layers with zeros. If the desired `chi_new` cannot\n be obtained from padding, adds layers of Tensors\n the last layer is guaranteed to have uniform bond dimension\n\n Parameters:\n --------------------------------\n chi_new: int \n new bond dimenion\n wC, vC, uC: list of tf.Tensor \n MERA isometries and disentanglers\n\n\n Returns: \n --------------------------------\n (wC, vC, uC): list of tf.Tensors\n \"\"\"\n\n all_chis = [t.shape[n] for t in wC for n in range(len(t.shape))]\n if not np.all([c <= chi_new for c in all_chis]):\n #nothing to increase\n return wC, vC, uC\n \n chi_0 = wC[0].shape[0]\n wC[0] = misc_mera.pad_tensor(wC[0], [chi_0, chi_0, min(chi_new, chi_0 ** 2)])\n vC[0] = misc_mera.pad_tensor(vC[0], [chi_0, chi_0, min(chi_new, chi_0 ** 2)])\n \n for n in range(1, len(wC)):\n wC[n] = misc_mera.pad_tensor(wC[n], [min(chi_new,chi_0 ** (2 * n)), min(chi_new, chi_0 ** (2 * n)), min(chi_new, chi_0 ** (4 * n))])\n vC[n] = misc_mera.pad_tensor(vC[n], [min(chi_new,chi_0 ** (2 * n)), min(chi_new, chi_0 ** (2 * n)), min(chi_new, chi_0 ** (4 * n))])\n uC[n] = misc_mera.pad_tensor(uC[n], [min(chi_new,chi_0 ** (2 * n)), min(chi_new, chi_0 ** (2 * n)), min(chi_new,chi_0 ** (2 * n)), min(chi_new,chi_0 ** (2 * n))])\n\n n = len(wC)\n while not misc_mera.all_same_chi(wC[-1]):\n wC.append(misc_mera.pad_tensor(wC[-1], [min(chi_new,chi_0 ** (2 * n)), min(chi_new, chi_0 ** (2 * n)), min(chi_new, chi_0 ** (4 * n))]))\n vC.append(misc_mera.pad_tensor(vC[-1], [min(chi_new,chi_0 ** (2 * n)), min(chi_new, chi_0 ** (2 * n)), min(chi_new, chi_0 ** (4 * n))]))\n uC.append(misc_mera.pad_tensor(uC[-1], [min(chi_new,chi_0 ** (2 * n)), min(chi_new, chi_0 ** (2 * n)), min(chi_new,chi_0 ** (2 * n)), min(chi_new,chi_0 ** (2 * n))])) \n n +=1\n\n return wC, vC, uC\n \ndef initialize_TFI_hams(dtype=tf.float64):\n \"\"\"\n initialize a transverse field ising hamiltonian\n\n Returns:\n ------------------\n (hamBA, hamBA)\n tuple of tf.Tensors\n \"\"\"\n sX = np.array([[0, 1], [1, 0]])\n sY = np.array([[0, -1j], [1j, 0]])\n sZ = np.array([[1, 0], [0, -1]])\n\n htemp = -np.kron(\n sX, sX) - 0.5 * (np.kron(sZ, np.eye(2)) + np.kron(np.eye(2), sZ))\n hbig = (0.5 * np.kron(np.eye(4), htemp) + np.kron(\n np.eye(2), np.kron(htemp, np.eye(2))) +\n 0.5 * np.kron(htemp, np.eye(4))).reshape(2, 2, 2, 2, 2, 2, 2, 2)\n\n hamAB = tf.Variable(\n (hbig.transpose(0, 1, 3, 2, 4, 5, 7,\n 6).reshape(4, 4, 4, 4)).astype(dtype.as_numpy_dtype),\n use_resource=True,\n name='hamAB_0',\n dtype=dtype)\n hamBA = tf.Variable(\n (hbig.transpose(1, 0, 2, 3, 5, 4, 6,\n 7).reshape(4, 4, 4, 4)).astype(dtype.as_numpy_dtype),\n use_resource=True,\n name='hamBA_0',\n dtype=dtype)\n return hamAB, hamBA\n\n\ndef initialize_mod_binary_MERA(phys_dim,\n chi,\n dtype=tf.float64):\n \n \"\"\"\n Parameters:\n -------------------\n phys_dim: int \n Hilbert space dimension of the bottom layer\n chi: int \n maximum bond dimension\n dtype: tensorflow dtype\n dtype of the MERA tensors\n Returns:\n -------------------\n (wC, vC, uC, rhoAB, rhoBA)\n wC, vC, uC: list of tf.Tensor\n rhoAB, rhoBA: tf.Tensor\n \"\"\"\n \n wC, vC, uC = increase_bond_dimension_by_adding_layers(chi_new=chi,\n wC=[tf.random_uniform(shape=[phys_dim, phys_dim, phys_dim],dtype=dtype)],\n vC=[tf.random_uniform(shape=[phys_dim, phys_dim, phys_dim],dtype=dtype)],\n uC=[tf.random_uniform(shape=[phys_dim, phys_dim, phys_dim, phys_dim],dtype=dtype)])\n chi_top = wC[-1].shape[2]\n rhoAB = tf.reshape(tf.eye(chi_top * chi_top, dtype=dtype),\n (chi_top, chi_top, chi_top, chi_top))\n\n rhoBA = tf.reshape(tf.eye(chi_top * chi_top, dtype=dtype),\n (chi_top, chi_top, chi_top, chi_top))\n \n return wC, vC, uC, rhoAB, rhoBA\n\n\n\ndef run_mod_binary_mera_optimization_TFI(chis=[8, 12, 16], niters=[200, 300, 1000], dtype=tf.float64, verbose=1, refsym=True):\n wC, vC, uC, rhoAB_0, rhoBA_0 = initialize_mod_binary_MERA(phys_dim=4, chi=chis[0],dtype=dtype)\n hamAB_0, hamBA_0 = initialize_TFI_hams(dtype=dtype)\n energies = []\n walltimes = []\n for chi, niter in zip(chis, niters):\n wC, vC, uC = increase_bond_dimension_by_padding(chi,wC, vC, uC)\n rhoAB_0, rhoBA_0 = misc_mera.pad_tensor(rhoAB_0, [chi,chi,chi,chi]), misc_mera.pad_tensor(rhoBA_0, [chi,chi,chi,chi]) \n wC, vC, uC, rhoAB_0, rhoBA_0, times, es = optimize_mod_binary_mera(hamAB_0=hamAB_0, hamBA_0=hamBA_0, rhoAB_0=rhoAB_0, \n rhoBA_0=rhoBA_0, wC=wC,\n vC=vC, uC=uC, verbose = verbose,\n numiter=niter,opt_u=True, opt_vw=True, refsym=refsym)\n energies.extend(es)\n walltimes.extend(times)\n return energies, walltimes, wC, vC, uC\n \n\ndef benchmark_ascending_operator(hab, hba, w, v, u, num_layers):\n t1 = time.time()\n for t in range(num_layers):\n hab, hba = ascending_super_operator(\n hab, hba, w, v, u, refsym=False)\n return time.time() - t1\n\ndef benchmark_descending_operator(rhoab, rhoba, w, v, u, num_layers):\n t1 = time.time()\n for p in range(num_layers):\n rhoab, rhoba = descending_super_operator(rhoab,rhoba,w,v,u, refsym=False)\n return time.time() - t1\n\n\n\ndef run_ascending_operator_benchmark(filename,\n chis=[4, 8, 16, 32],\n num_layers=8,\n dtype=tf.float64,\n device=None):\n walltimes = {'warmup': {}, 'profile': {}}\n for chi in chis:\n print('running ascending-operator benchmark for chi = {0} benchmark'.\n format(chi))\n with tf.device(device):\n wC, vC, uC, rhoAB, rhoBA = initialize_mod_binary_MERA(phys_dim=4, chi=chi, dtype=dtype)\n shape = uC[-1].shape\n hab, hba = tf.random_uniform(shape = shape, dtype=dtype),tf.random_uniform(shape = shape, dtype=dtype)\n walltimes['warmup'][chi] = benchmark_ascending_operator(\n hab, hba, wC[-1], vC[-1], uC[-1], num_layers)\n print(' warmup took {0} s'.format(walltimes['warmup'][chi]))\n walltimes['profile'][chi] = benchmark_ascending_operator(\n hab, hba, wC[-1], vC[-1], uC[-1], num_layers) \n print(' profile took {0} s'.format(walltimes['profile'][chi]))\n\n with open(filename + '.pickle', 'wb') as f:\n pickle.dump(walltimes, f)\n return walltimes\n\ndef run_descending_operator_benchmark(filename,\n chis=[4, 8, 16, 32],\n num_layers=8,\n dtype=tf.float64,\n device=None):\n walltimes = {'warmup': {}, 'profile': {}}\n for chi in chis:\n print('running descending-operator benchmark for chi = {0} benchmark'.\n format(chi))\n with tf.device(device):\n wC, vC, uC, rhoAB, rhoBA = initialize_mod_binary_MERA(phys_dim=4, chi=chi, dtype=dtype)\n shape = uC[-1].shape \n hab, hba = tf.random_uniform(shape = shape, dtype=dtype),tf.random_uniform(shape = shape, dtype=dtype) \n walltimes['warmup'][chi] = benchmark_descending_operator(\n rhoAB, rhoBA, wC[-1], vC[-1], uC[-1], num_layers = num_layers)\n print(' warmup took {0} s'.format(walltimes['warmup'][chi]))\n walltimes['profile'][chi] = benchmark_descending_operator(\n rhoAB, rhoBA, wC[-1], vC[-1], uC[-1], num_layers = num_layers)\n print(' profile took {0} s'.format(walltimes['profile'][chi]))\n\n with open(filename + '.pickle', 'wb') as f:\n pickle.dump(walltimes, f)\n return walltimes\n\ndef run_optimization_naive_benchmark(filename,\n chis=[4, 8, 16, 32],\n dtype=tf.float64,\n numiter=30,\n device=None, opt_u=True, opt_vw=True, np_update=True, refsym=True):\n \n walltimes = {'profile': {}, 'energies' : {}} \n with tf.device(device): \n for chi in chis:\n print('running naive optimization benchmark for chi = {0}'.\n format(chi))\n\n wC, vC, uC, rhoAB_0, rhoBA_0 = initialize_mod_binary_MERA(phys_dim=4, chi=chi, dtype=dtype)\n hamAB_0, hamBA_0 = initialize_TFI_hams(dtype=dtype)\n wC, vC, uC, rhoAB_0, rhoBA_0, runtimes, energies = optimize_mod_binary_mera(hamAB_0=hamAB_0, hamBA_0=hamBA_0, rhoAB_0=rhoAB_0, \n rhoBA_0=rhoBA_0, wC=wC,\n vC=vC, uC=uC, verbose=1,\n numiter=numiter,opt_u=True, opt_vw=True, refsym=refsym)\n walltimes['profile'][chi] = runtimes\n walltimes['energies'][chi] = energies\n print(' steps took {0} s'.format(walltimes['profile'][chi]))\n with open(filename + '.pickle', 'wb') as f:\n pickle.dump(walltimes, f)\n\n return walltimes\n \n\ndef run_optimization_benchmark(filename,\n chis=[4, 8, 16, 32],\n numiters=[200, 200, 400, 800],\n dtype=tf.float64,\n device=None, \n refsym=True, verbose=1):\n \n walltimes = {}\n with tf.device(device): \n print('running optimization benchmark')\n energies, runtimes, wC, vC, uC = run_mod_binary_mera_optimization_TFI(chis=chis, niters=numiters, dtype=dtype, verbose=verbose, refsym=refsym)\n walltimes['profile'] = runtimes\n walltimes['energies'] = energies\n with open(filename + '.pickle', 'wb') as f:\n pickle.dump(walltimes, f)\n\n return walltimes\n \n\nif __name__ == \"__main__\":\n if not tf.executing_eagerly():\n pass\n\n else:\n #uncomment to perform benchmarks\n benchmarks = {'descend' : {'chis' : [8, 10, 12],\n 'dtype' : tf.float32,\n 'num_layers' : 8}}\n # benchmarks = {'ascend' : {'chis' : [8, 10, 12],\n # 'dtype' : tf.float32,\n # 'num_layers' : 8}}\n\n # benchmarks = {'ascend' : {'chis' : [16, 32, 40, 48, 54],\n # 'dtype' : tf.float32,\n # 'num_layers' : 8},\n # 'descend' : {'chis' : [16, 32, 40, 48, 54],\n # 'dtype' : tf.float32,\n # 'num_layers' : 8}}\n \n # benchmarks = {'optimize_naive' : {'chis' : [8,12],\n # 'dtype' : tf.float64,\n # 'opts_u' : [True, True],\n # 'opts_vw' : [True, True],\n # 'np_update' : True,\n # 'refsym' : True,\n # 'numiter' : 5}}\n # benchmarks = {'optimize' : {'chis' : [8, 10, 12, 16],\n # 'numiters' : [400, 400, 800, 1000], \n # 'dtype' : tf.float64,\n # 'refsym' : True}}\n\n \n use_gpu = False\n DEVICES = tf.contrib.eager.list_devices()\n print(\"Available devices:\")\n for i, device in enumerate(DEVICES):\n print(\"%d) %s\" % (i, device))\n CPU = '/device:CPU:0'\n GPU = '/job:localhost/replica:0/task:0/device:GPU:0'\n if use_gpu:\n specified_device_type = GPU\n name = 'GPU'\n else:\n specified_device_type = CPU\n name = 'CPU'\n\n\n if 'ascend' in benchmarks:\n num_layers = benchmarks['ascend']['num_layers']\n dtype = benchmarks['ascend']['dtype']\n chis = benchmarks['ascend']['chis']\n chis_str = '{0}'.format(chis).replace(' ','')\n fname = 'ascending_benchmarks'\n if not os.path.exists(fname):\n os.mkdir(fname)\n os.chdir(fname)\n run_ascending_operator_benchmark(\n name +\n 'modified_binary_mera_ascending_benchmark_chi{0}_numlayers{1}_dtype{2}'.\n format(chis_str, num_layers, dtype.name),\n chis=chis,\n num_layers=num_layers,\n device=specified_device_type)\n \n if 'descend' in benchmarks:\n num_layers = benchmarks['descend']['num_layers']\n dtype = benchmarks['descend']['dtype']\n chis = benchmarks['descend']['chis']\n chis_str = '{0}'.format(chis).replace(' ','')\n fname = 'descending_benchmarks'\n if not os.path.exists(fname):\n os.mkdir(fname)\n os.chdir(fname)\n run_descending_operator_benchmark(\n name +\n 'modified_binary_mera_descending_benchmark_chi{0}_numlayers{1}_dtype{2}'.\n format(chis_str, num_layers, dtype.name),\n chis=chis,\n num_layers=num_layers,\n device=specified_device_type)\n \n if 'optimize_naive' in benchmarks:\n dtype = benchmarks['optimize_naive']['dtype']\n chis = benchmarks['optimize_naive']['chis']\n numiter = benchmarks['optimize_naive']['numiter']\n opts_u = benchmarks['optimize_naive']['opts_u']\n opts_vw = benchmarks['optimize_naive']['opts_vw']\n np_update = benchmarks['optimize_naive']['np_update']\n refsym = benchmarks['optimize_naive']['refsym'] \n chis_str = '{0}'.format(chis).replace(' ','')\n fname = 'benchmarks_optimize_naive'\n if not os.path.exists(fname):\n os.mkdir(fname)\n os.chdir(fname)\n \n \n for opt_u, opt_vw in zip(opts_u, opts_vw):\n run_optimization_naive_benchmark(\n name +\n 'modified_binary_mera_optimization_benchmark_Nthreads{6}_chi{0}_dtype{1}_opt_u{2}_opt_vw{3}_numiter{4}_npupdate{5}'.\n format(chis_str, dtype.name, opt_u, opt_vw, numiter, np_update, NUM_THREADS),\n chis=chis,\n numiter=numiter,\n device=specified_device_type,\n opt_u=opt_u,\n dtype=dtype,\n opt_vw=opt_vw,\n np_update=np_update,\n refsym=refsym)\n\n\n if 'optimize' in benchmarks:\n dtype = benchmarks['optimize']['dtype']\n chis = benchmarks['optimize']['chis']\n numiters = benchmarks['optimize']['numiters']\n refsym = benchmarks['optimize']['refsym'] \n chis_str = '{0}'.format(chis).replace(' ','')\n numiters_str = '{0}'.format(numiters).replace(' ','') \n fname = 'benchmarks_optimize'\n if not os.path.exists(fname):\n os.mkdir(fname)\n os.chdir(fname)\n filename = name + 'modified_binary_mera_optimization_benchmark_Nthreads{0}_chis{1}_dtype{2}_numiters{3}'.format(NUM_THREADS, chis_str,\n dtype.name, numiters_str)\n \n run_optimization_benchmark(filename,\n chis=chis,\n numiters=numiters,\n dtype=dtype,\n device=specified_device_type, \n refsym=True, verbose=1)\n\n" }, { "alpha_fraction": 0.6968838572502136, "alphanum_fraction": 0.7157695889472961, "avg_line_length": 32.09375, "blob_id": "115a947d3ae91279b7c89a5a6158f330390f47ac", "content_id": "f4b0925495f116140fef572cceb61d57728aec94", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1059, "license_type": "permissive", "max_line_length": 74, "num_lines": 32, "path": "/tensornetwork/ncon_interface_test.py", "repo_name": "kshithijiyer/TensorNetwork", "src_encoding": "UTF-8", "text": "# Copyright 2019 The TensorNetwork Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport tensorflow as tf\ntf.enable_v2_behavior()\nfrom tensornetwork import ncon_interface\n\n\nclass NconTest(tf.test.TestCase):\n\n def test_ncon_sanity_check(self):\n result = ncon_interface.ncon(\n [tf.ones((2, 2)), tf.ones((2, 2))], [(-1, 0), (0, -2)])\n self.assertAllClose(result, tf.ones((2, 2)) * 2)\n\n\nif __name__ == '__main__':\n tf.test.main()\n" }, { "alpha_fraction": 0.6919159889221191, "alphanum_fraction": 0.7116486430168152, "avg_line_length": 33.15217208862305, "blob_id": "cdf3efbdaf4464577ffa0f93a5bbfbb4727655ad", "content_id": "f7ef166a1240f06a00374b4716caa29490b5e2cc", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3142, "license_type": "permissive", "max_line_length": 123, "num_lines": 92, "path": "/README.md", "repo_name": "kshithijiyer/TensorNetwork", "src_encoding": "UTF-8", "text": "# TensorNetwork\n[![Build Status](https://travis-ci.org/google/TensorNetwork.svg?branch=master)](https://travis-ci.org/google/TensorNetwork)\n\n\nA tensor network wrapper for TensorFlow.\n## Installation\n```\npip3 install tensornetwork\n```\n\nNote: The following examples assume a TensorFlow v2 interface \n(in TF 1.13 or higher, run `tf.enable_v2_behavior()` after \nimporting tensorflow) but should also work with eager mode \n(`tf.enable_eager_execution()`). The actual library does work \nunder graph mode, but documentation is limited.\n\n## Basic Example\nHere, we build a simple 2 node contraction.\n```python\nimport numpy as np\nimport tensorflow as tf\ntf.enable_v2_behavior()\nimport tensornetwork\n\n# Create the network\nnet = tensornetwork.TensorNetwork()\n# Add the nodes\na = net.add_node(np.ones((10,), dtype=np.float32)) \n# Can use either np.array or tf.Tensor and can even mix them!\nb = net.add_node(tf.ones((10,)))\nedge = net.connect(a[0], b[0])\nfinal_node = net.contract(edge)\nprint(final_node.get_tensor().numpy()) # Should print 10.0\n```\n\n## Node and Edge names.\nYou can optionally name your nodes/edges. This can be useful for debugging, \nas all error messages will print the name of the broken edge/node.\n```python\nnet = tensornetwork.TensorNetwork()\nnode = net.add_node(np.eye(2), name=\"Identity Matrix\")\nprint(\"Name of node: {}\".format(node.name))\nedge = net.connect(node[0], node[1], name=\"Trace Edge\")\nprint(\"Name of the edge: {}\".format(edge.name))\n# Adding name to a contraction will add the name to the new edge created.\nfinal_result = net.contract(edge, name=\"Trace Of Identity\")\nprint(\"Name of new node after contraction: {}\".format(final_result.name))\n```\n## Named axes.\nTo make remembering what an axis does easier, you can optionally name a node's axes.\n```python\nnet = tensornetwork.TensorNetwork()\na = net.add_node(np.zeros((2, 2)), axis_names=[\"alpha\", \"beta\"])\nedge = net.connect(a[\"beta\"], a[\"alpha\"])\n```\n\n## Edge reordering.\nTo assert that your result's axes are in the correct order, you can reorder a node at any time during computation.\n```python\nnet = tensornetwork.TensorNetwork()\na = net.add_node(np.zeros((1, 2, 3)))\ne1 = a[0]\ne2 = a[1]\ne3 = a[2]\na.reorder_edges([e3, e1, e2])\n# If you already know the axis values, you can equivalently do\n# a.reorder_axes([2, 0, 1])\nprint(a.tensor.shape) # Should print (3, 1, 2)\n```\n\n## NCON interface.\nFor a more compact specification of a tensor network and its contraction, there is `ncon()`. For example:\n```python\nfrom tensornetwork import ncon\na = tf.random_normal((2,2))\nb = tf.random_normal((2,2))\nc = ncon([a,b], [(-1,0),(0,-2)])\nprint(tf.norm(tf.matmul(a,b) - c)) # Should be zero\n```\nIt is also possible to generate a `TensorNetwork`:\n```python\nfrom tensornetwork import ncon_network\na = tf.random_normal((2,2))\nb = tf.random_normal((2,2))\nnet, e_con, e_out = ncon_network([a,b], [(-1,0),(0,-2)])\nfor e in e_con:\n n = net.contract(e) # Contract edges in order\nn.reorder_edges(e_out) # Permute final tensor as necessary\nprint(tf.norm(tf.matmul(a,b) - n.get_tensor()))\n```\n\nTensorNetwork is not an official Google product. Copyright 2019 The TensorNetwork Developers.\n" }, { "alpha_fraction": 0.8441558480262756, "alphanum_fraction": 0.8441558480262756, "avg_line_length": 50, "blob_id": "208af02042fd3a5e733cfccd634100b344e25aa9", "content_id": "6028847d720b2e7aa6ab9df808ecdd14085eaea9", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 154, "license_type": "permissive", "max_line_length": 59, "num_lines": 3, "path": "/tensornetwork/__init__.py", "repo_name": "kshithijiyer/TensorNetwork", "src_encoding": "UTF-8", "text": "from __future__ import absolute_import\nfrom tensornetwork.tensornetwork import TensorNetwork\nfrom tensornetwork.ncon_interface import ncon, ncon_network\n\n" }, { "alpha_fraction": 0.71875, "alphanum_fraction": 0.7307692170143127, "avg_line_length": 32.72972869873047, "blob_id": "857e0be987dbcdd98a12beb98eec7798dce14c9f", "content_id": "1ec2a5b8f3202c1d6f2df5efd000ecbc902a49ec", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1248, "license_type": "permissive", "max_line_length": 74, "num_lines": 37, "path": "/setup.py", "repo_name": "kshithijiyer/TensorNetwork", "src_encoding": "UTF-8", "text": "# Copyright 2019 The TensorNetwork Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nfrom setuptools import find_packages, setup\n\n# This reads the __version__ variable from cirq/_version.py\n__version__ = ''\ndescription = ('A high level tensor network API for tensorflow.')\n\n# Read in requirements\nrequirements = open('requirements.txt').readlines()\nrequirements = [r.strip() for r in requirements]\n\nsetup(\n name='tensornetwork',\n version='0.0.1',\n url='http://github.com/google/TensorNetwork',\n author='The TensorNetwork Developers',\n author_email='[email protected]',\n python_requires=('>=3.5.0'),\n install_requires=requirements,\n license='Apache 2',\n description=description,\n packages=['tensornetwork'],\n)\n" } ]
7
giorgiobasile/resync-py-server
https://github.com/giorgiobasile/resync-py-server
17106599115904dde19b59397e902fe4f383956e
b725cb165cb60fad8de6b78cd4b46919e16c0cf2
55b536c39817758f0b70dfd45ac5ff452d07e228
refs/heads/master
2023-07-23T20:40:11.252682
2017-01-24T19:50:07
2017-01-24T19:51:09
79,399,174
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6557203531265259, "alphanum_fraction": 0.6578390002250671, "avg_line_length": 24.849315643310547, "blob_id": "10f7dd6a462d3e8367a0f34a63a4971c5c9f1e18", "content_id": "94235cb84281c7734806e6b517175d1993cefefb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1888, "license_type": "no_license", "max_line_length": 71, "num_lines": 73, "path": "/resyncserver/elastic/elastic_generator.py", "repo_name": "giorgiobasile/resync-py-server", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport optparse\nimport os\nimport time\n\nimport yaml\nfrom rspub.core.rs_enum import Strategy\n\nfrom resyncserver.elastic.elastic_rs import ElasticResourceSync\nfrom resyncserver.elastic.elastic_rs_paras import ElasticRsParameters\n\nRESOURCE_TYPE = \"resource\"\n\n\nclass ElasticGenerator(object):\n\n def __init__(self, config):\n self.config = config\n\n def generate(self):\n rs = ElasticResourceSync(**self.config.__dict__)\n rs.execute()\n return 0\n\n def generate_resourcelist(self):\n self.config.strategy = Strategy.resourcelist.value\n return self.generate()\n\n def generate_new_changelist(self):\n self.config.strategy = Strategy.new_changelist.value\n return self.generate()\n\n\ndef main():\n parser = optparse.OptionParser()\n parser.add_option('--config-file', '-c',\n help=\"the source configuration file\")\n\n # Parse command line arguments\n (args, clargs) = parser.parse_args()\n\n if len(clargs) > 0:\n parser.print_help()\n return\n if args.config_file is None:\n parser.print_help()\n return\n\n config = yaml.load(open(args.config_file, 'r'))['executor']\n\n if not os.path.exists(config['description_dir']):\n os.makedirs(config['description_dir'])\n\n rs_params = ElasticRsParameters(**config)\n start = time.clock()\n\n gener = ElasticGenerator(rs_params)\n gener.generate_resourcelist()\n\n elapsed_time = time.clock() - start\n print(\"Elapsed time:\", elapsed_time)\n print(\"Published simple resourcelist at\", rs_params.last_execution)\n\n # todo\n # ch_params = RsParameters(**params)\n # gener = ElasticGenerator(rs_params, INDEX, RESOURCE_TYPE)\n # gener.generate_new_changelist()\n # print(\"Published capabilitylist at\", ch_params.last_execution)\n\n\nif __name__ == '__main__':\n main()\n\n" }, { "alpha_fraction": 0.7058823704719543, "alphanum_fraction": 0.7058823704719543, "avg_line_length": 33, "blob_id": "9ac47120c3fc62ec1c103a599426211a258d256b", "content_id": "f506d3de17cb38d9409edf300c3fc7ff1099c61c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 34, "license_type": "no_license", "max_line_length": 33, "num_lines": 1, "path": "/resyncserver/__init__.py", "repo_name": "giorgiobasile/resync-py-server", "src_encoding": "UTF-8", "text": "\"\"\"ResourceSync server module.\"\"\"\n" }, { "alpha_fraction": 0.588652491569519, "alphanum_fraction": 0.5963894128799438, "avg_line_length": 31.08965492248535, "blob_id": "f2cd5fca899a70b6653fb54e57cdba90666da119", "content_id": "de08584402e2d27cfef058e82a4e3ec5399c7719", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4653, "license_type": "no_license", "max_line_length": 97, "num_lines": 145, "path": "/resyncserver/http_interface.py", "repo_name": "giorgiobasile/resync-py-server", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\nhttp_interface.py: The source's HTTP Web interface.\n\nRuns on the non-blocking Tornado web server (http://www.tornadoweb.org/)\n\nCreated by Bernhard Haslhofer on 2012-04-24.\nEdited by Giorgio Basile on 2016-12-12.\n\"\"\"\nimport mimetypes\nimport threading\nimport os.path\nimport logging\nfrom datetime import datetime\n\nimport tornado.httpserver\nimport tornado.ioloop\nimport tornado.web\n\nfrom resyncserver.source import Source\n\n\nclass HTTPInterface(object):\n \"\"\"The repository's HTTP interface.\n\n To make sure it doesn't interrupt\n the simulation, it runs in a separate thread.\n\n http://stackoverflow.com/questions/323972/\n is-there-any-way-to-kill-a-thread-in-python (Stoppable Threads)\n\n http://www.slideshare.net/juokaz/\n restful-web-services-with-python-dynamic-languages-conference\n \"\"\"\n\n STATIC_FILE_PATH = os.path.join(os.path.dirname(__file__), \"static\")\n\n def __init__(self, source):\n \"\"\"Initialize HTTP interface with default settings and handlers.\"\"\"\n super(HTTPInterface, self).__init__()\n self.logger = logging.getLogger('http')\n self.source = source\n self._stop = threading.Event()\n self.port = source.port\n self.settings = dict(\n title=u\"ResourceSync Server\",\n template_path=os.path.join(os.path.dirname(__file__), \"templates\"),\n static_path=self.STATIC_FILE_PATH,\n source_path=source.config['source_path'],\n autoescape=None,\n )\n self.handlers = [\n (r\"/\", HomeHandler, dict(source=self.source)),\n (r\"/(.*)\", ResourceHandler,\n dict(source=self.source)),\n (r\"/(favicon\\.ico)\", tornado.web.StaticFileHandler,\n dict(path=self.settings['static_path'])),\n ]\n\n def run(self):\n \"\"\"Run server.\"\"\"\n\n self.logger.info(\"Starting up HTTP Interface on port %i\" % (self.port))\n application = tornado.web.Application(\n handlers=self.handlers,\n debug=True,\n **self.settings)\n self.http_server = tornado.httpserver.HTTPServer(application)\n self.http_server.listen(self.port)\n tornado.ioloop.IOLoop.instance().start()\n\n def stop(self):\n \"\"\"Stop server.\"\"\"\n self.logger.info(\"Stopping HTTP Interface\")\n tornado.ioloop.IOLoop.instance().stop()\n self._stop.set()\n\n def stopped(self):\n \"\"\"True if server is stopped.\"\"\"\n return self._stop.isSet()\n\n\nclass BaseRequestHandler(tornado.web.RequestHandler):\n \"\"\"Handler for source.\"\"\"\n\n SUPPORTED_METHODS = (\"GET\")\n\n def initialize(self, source):\n \"\"\"Initialize with supplied source.\"\"\"\n self.source = source\n\n\nclass HomeHandler(BaseRequestHandler):\n \"\"\"Root URI handler.\"\"\"\n\n def get(self):\n \"\"\"Implement GET for homepage.\"\"\"\n print(\"Received request at: \" + self.request.path)\n self.render(\"home.html\",\n resource_count=self.source.resource_count,\n source=self.source)\n\n\nclass ResourcesHandler(BaseRequestHandler):\n \"\"\"Resources subset selection handler.\"\"\"\n\n def get(self):\n \"\"\"Implement GET for resources.\"\"\"\n rand_res = sorted(self.source.random_resources(100),\n key=lambda res: res.uri)\n self.render(\"resource.index.html\",\n resources=rand_res,\n source=self.source)\n\n\nclass ResourceHandler(BaseRequestHandler):\n \"\"\"Resource handler.\"\"\"\n\n def get(self, base_url: str):\n print(\"Received request at: \" + base_url)\n \"\"\"Implement GET for resource.\"\"\"\n file_path = self.settings[\"source_path\"] + \"/\" + base_url\n\n if not os.path.isfile(file_path):\n self.send_error(404)\n else:\n #payload = open(file_path, \"r\", encoding=\"utf-8\").read()\n\n if file_path.endswith(\".well-known/resourcesync\"):\n self.set_header(\"Content-Type\", \"application/xml\")\n else:\n (type, enc) = mimetypes.guess_type(file_path)\n self.set_header(\"Content-Type\", type)\n if enc is not None:\n self.set_header(\"Content-Encoding\", enc)\n\n self.set_header(\"Content-Length\", os.path.getsize(file_path))\n self.set_header(\"Last-Modified\", datetime.fromtimestamp(os.path.getmtime(file_path)))\n with open(file_path, 'rb') as f:\n while 1:\n data = f.read(16384) # or some other nice-sized chunk\n if not data: break\n self.write(data)\n self.finish()\n" }, { "alpha_fraction": 0.5440309047698975, "alphanum_fraction": 0.5480984449386597, "avg_line_length": 40.31932830810547, "blob_id": "cd326034bd02b2fda57d629dfca3813dae5cc77c", "content_id": "f25387c696410dc05228e9ff7f9f51520d414600", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9834, "license_type": "no_license", "max_line_length": 132, "num_lines": 238, "path": "/resyncserver/elastic/exe_elastic_resourcelist.py", "repo_name": "giorgiobasile/resync-py-server", "src_encoding": "UTF-8", "text": "import os\n\nfrom elasticsearch import Elasticsearch\nfrom resync import Resource\nfrom resync import ResourceList\nfrom rspub.core.executors import Executor, SitemapData, ExecutorEvent\nfrom rspub.core.rs_enum import Capability\nfrom rspub.util import defaults\n\n\nMAX_RESULT_WINDOW = 10000\n\n\nclass ElasticResourceListExecutor(Executor):\n def __init__(self, rs_parameters):\n super(ElasticResourceListExecutor, self).__init__(rs_parameters)\n\n\n def execute(self, filenames=None):\n # filenames is not necessary, we use it only to match the method signature\n self.date_start_processing = defaults.w3c_now()\n self.observers_inform(self, ExecutorEvent.execution_start, date_start_processing=self.date_start_processing)\n if not os.path.exists(self.para.abs_metadata_dir()):\n os.makedirs(self.para.abs_metadata_dir())\n\n self.prepare_metadata_dir()\n sitemap_data_iter = self.generate_rs_documents()\n self.post_process_documents(sitemap_data_iter)\n self.date_end_processing = defaults.w3c_now()\n self.create_index(sitemap_data_iter)\n\n capabilitylist_data = self.create_capabilitylist()\n self.update_resource_sync(capabilitylist_data)\n\n self.observers_inform(self, ExecutorEvent.execution_end, date_end_processing=self.date_end_processing,\n new_sitemaps=sitemap_data_iter)\n\n def prepare_metadata_dir(self):\n if self.para.is_saving_sitemaps:\n self.clear_metadata_dir()\n\n def generate_rs_documents(self, filenames: iter = None) -> [SitemapData]:\n # filenames is not necessary, we use it only to match the method signature\n sitemap_data_iter = []\n generator = self.resourcelist_generator()\n for sitemap_data, sitemap in generator():\n sitemap_data_iter.append(sitemap_data)\n\n return sitemap_data_iter\n\n def create_index(self, sitemap_data_iter: iter):\n if len(sitemap_data_iter) > 1:\n resourcelist_index = ResourceList()\n resourcelist_index.sitemapindex = True\n resourcelist_index.md_at = self.date_start_processing\n resourcelist_index.md_completed = self.date_end_processing\n index_path = self.para.abs_metadata_path(\"resourcelist-index.xml\")\n rel_index_path = os.path.relpath(index_path, self.para.resource_dir)\n index_url = self.para.url_prefix + defaults.sanitize_url_path(rel_index_path)\n resourcelist_index.link_set(rel=\"up\", href=self.para.capabilitylist_url())\n\n for sitemap_data in sitemap_data_iter:\n resourcelist_index.add(Resource(uri=sitemap_data.uri, md_at=sitemap_data.doc_start,\n md_completed=sitemap_data.doc_end))\n if sitemap_data.document_saved:\n self.update_rel_index(index_url, sitemap_data.path)\n\n self.finish_sitemap(-1, resourcelist_index)\n\n def resourcelist_generator(self) -> iter:\n\n def generator() -> [SitemapData, ResourceList]:\n resourcelist = None\n ordinal = self.find_ordinal(Capability.resourcelist.name)\n resource_count = 0\n doc_start = None\n resource_generator = self.resource_generator()\n for resource_count, resource in resource_generator():\n # stuff resource into resourcelist\n if resourcelist is None:\n resourcelist = ResourceList()\n doc_start = defaults.w3c_now()\n resourcelist.md_at = doc_start\n resourcelist.add(resource)\n\n # under conditions: yield the current resourcelist\n if resource_count % self.para.max_items_in_list == 0:\n ordinal += 1\n doc_end = defaults.w3c_now()\n resourcelist.md_completed = doc_end\n print(\"Generating resourcelist #:\" + str(ordinal))\n sitemap_data = self.finish_sitemap(ordinal, resourcelist, doc_start=doc_start, doc_end=doc_end)\n print(\"Finish\")\n yield sitemap_data, resourcelist\n resourcelist = None\n\n # under conditions: yield the current and last resourcelist\n if resourcelist:\n ordinal += 1\n doc_end = defaults.w3c_now()\n resourcelist.md_completed = doc_end\n #if ordinal == 0:\n # if we have a single doc, set ordinal to -1 so that the finish_sitemap will not append the\n # ordinal to the filename\n #ordinal = -1\n #print(\"Generating resourcelist\")\n #else:\n print(\"Generating resourcelist #:\" + str(ordinal))\n sitemap_data = self.finish_sitemap(ordinal, resourcelist, doc_start=doc_start, doc_end=doc_end)\n print(\"Finish\")\n yield sitemap_data, resourcelist\n\n return generator\n\n def resource_generator(self) -> iter:\n\n def generator(count=0) -> [int, Resource]:\n elastic_page_generator = self.elastic_page_generator()\n for e_page in elastic_page_generator():\n for e_hit in e_page:\n e_source = e_hit['_source']\n e_doc = ElasticResourceDoc(e_hit['_id'], e_source['filename'], e_source['size'], e_source['md5'],\n e_source['mime'], e_source['time'], e_source['publisher'], e_source['res_type'])\n filename = e_doc.filename\n file = os.path.abspath(filename)\n count += 1\n path = os.path.relpath(file, self.para.resource_dir)\n uri = self.para.url_prefix + defaults.sanitize_url_path(path)\n resource = Resource(uri=uri, length=e_doc.size,\n lastmod=e_doc.time,\n md5=e_doc.md5,\n mime_type=e_doc.mime)\n yield count, resource\n self.observers_inform(self, ExecutorEvent.created_resource, resource=resource,\n count=count, file=file)\n\n return generator\n\n def elastic_page_generator(self) -> iter:\n\n def generator() -> iter:\n es = Elasticsearch([{\"host\": self.para.elastic_host, \"port\": self.para.elastic_port}])\n result_size = self.para.max_items_in_list\n c_iter = 0\n n_iter = 1\n # index.max_result_window in Elasticsearch controls the max number of results returned from a query.\n # we can either increase it to 50k in order to match the sitemaps pagination requirements or not\n # in the latter case, we have to bulk the number of items that we want to put into each resourcelist chunk\n if self.para.max_items_in_list > MAX_RESULT_WINDOW:\n n = self.para.max_items_in_list / MAX_RESULT_WINDOW\n n_iter = int(n)\n result_size = MAX_RESULT_WINDOW\n\n query = {\"query\":\n {\"bool\":\n {\"must\":[\n {\"term\":\n {\"publisher\": self.para.publisher_name}\n },\n\n {\"term\":\n {\"res_type\": self.para.res_type}\n }]\n }\n }\n }\n\n page = es.search(index=self.para.elastic_index, doc_type=self.para.elastic_resource_type, scroll='2m', size=result_size,\n body=query)\n sid = page['_scroll_id']\n # total_size = page['hits']['total']\n scroll_size = len(page['hits']['hits'])\n bulk = page['hits']['hits']\n c_iter += 1\n # if c_iter and n_iter control the number of iteration we need to perform in order to yield a bulk of\n # (at most) self.para.max_items_in_list\n if c_iter >= n_iter or scroll_size < result_size:\n c_iter = 0\n yield bulk\n bulk = []\n while scroll_size > 0:\n page = es.scroll(scroll_id=sid, scroll='2m')\n # Update the scroll ID\n sid = page['_scroll_id']\n # Get the number of results that we returned in the last scroll\n scroll_size = len(page['hits']['hits'])\n bulk.extend(page['hits']['hits'])\n c_iter += 1\n if c_iter >= n_iter or scroll_size < result_size:\n c_iter = 0\n yield bulk\n bulk = []\n\n return generator\n\n\nclass ElasticResourceDoc(object):\n def __init__(self, elastic_id, filename, size, md5, mime, time, publisher, res_type):\n self._elastic_id = elastic_id\n self._filename = filename\n self._size = size\n self._md5 = md5\n self._mime = mime\n self._time = time\n self._publisher = publisher\n self._res_type = res_type\n\n @property\n def elastic_id(self):\n return self.elastic_id\n\n @property\n def filename(self):\n return self._filename\n\n @property\n def size(self):\n return self._size\n\n @property\n def md5(self):\n return self._md5\n\n @property\n def mime(self):\n return self._mime\n\n @property\n def time(self):\n return self._time\n\n @property\n def publisher(self):\n return self._publisher\n\n @property\n def res_type(self):\n return self._res_type\n" }, { "alpha_fraction": 0.5805506706237793, "alphanum_fraction": 0.5946104526519775, "avg_line_length": 38.72093200683594, "blob_id": "bddf8ab19ab6d44fbfd28503eacd75d1f2224e9d", "content_id": "b1811146566b902babfa27c5e1a5eed344fc7427", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1707, "license_type": "no_license", "max_line_length": 158, "num_lines": 43, "path": "/setup.py", "repo_name": "giorgiobasile/resync-py-server", "src_encoding": "UTF-8", "text": "from setuptools import setup\n\nimport re\nVERSIONFILE=\"resyncserver/_version.py\"\nverfilestr = open(VERSIONFILE, \"rt\").read()\nmatch = re.search(r\"^__version__ = '(\\d\\.\\d.\\d+(\\.\\d+)?)'\", verfilestr, re.MULTILINE)\nif match:\n version = match.group(1)\nelse:\n raise RuntimeError(\"Unable to find version string in %s.\" % (VERSIONFILE))\n\nsetup(\n name='resync-omtd-demo',\n version=version,\n packages=['resyncserver'],\n package_data={'resyncserver': ['static/*','templates/*']},\n scripts=['resync-server.py'],\n classifiers=[\"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Environment :: Web Environment\"],\n license='Creative Commons Attribution-Noncommercial-Share Alike license',\n author='Giorgio Basile',\n author_email='[email protected]',\n description='ResourceSync generator and server demo',\n long_description=open('README.md').read(),\n install_requires=[\n \"tornado>=4.4.2\",\n \"pyyaml\",\n \"watchdog>=0.8.3\",\n 'logutils',\n \"elasticsearch>=1.0.0,<2.0.0\"\n ],\n dependency_links=[\"https://github.com/EHRI/rspub-core/tarball/master#egg=rspub-core\", \"https://github.com/EHRI/resync/tarball/ehribranch#egg=resyncehri\"],\n test_suite=\"resyncserver.test\",\n)" }, { "alpha_fraction": 0.6386371850967407, "alphanum_fraction": 0.641355574131012, "avg_line_length": 31.447059631347656, "blob_id": "6bc228bcd7744a6a171726892f994534e0d88e8e", "content_id": "303a99cf4bef830d8c5a22da61991c83dbd70571", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5518, "license_type": "no_license", "max_line_length": 112, "num_lines": 170, "path": "/resyncserver/source.py", "repo_name": "giorgiobasile/resync-py-server", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\nsource.py: A source holds a set of resources and changes over time.\n\nCreated by Giorgio Basile on 09-01-2017.\n\"\"\"\n\nimport logging\nimport os\nimport pprint\nimport random\nimport time\nfrom concurrent.futures import ThreadPoolExecutor\n\nfrom tornado import gen\n\nfrom resyncserver.elastic.elastic_generator import ElasticGenerator\nfrom resyncserver.elastic.elastic_rs_paras import ElasticRsParameters\nfrom resyncserver.observer import Observable\n\n\nclass DynamicResourceListBuilder(object):\n \"\"\"Generates an resource_list snapshot from a source.\"\"\"\n\n def __init__(self, source):\n \"\"\"Initialize the DynamicResourceListBuilder.\"\"\"\n self.source = source\n self.config = self.source.publish_configs\n self.logger = logging.getLogger('resource_list_builder')\n self.executor = self.source.executor\n\n def bootstrap(self):\n \"\"\"Bootstrapping procedures implemented in subclasses.\"\"\"\n # todo implement a policy for resourcelist regeneration, this will not triggered in the source bootstrap\n #self.generate()\n pass\n\n @gen.coroutine\n def generate(self, config):\n \"\"\"Generate a resource_list (snapshot from the source).\"\"\"\n then = time.time()\n self.new_resource_list()\n now = time.time()\n self.logger.info(\"Generated resource_list: %f\" % (now - then))\n\n @gen.coroutine\n def new_resource_list(self):\n f = yield self.executor.submit(self.res_gen)\n print(str(f))\n\n def res_gen(self):\n rs_params = ElasticRsParameters(**self.source.config)\n gener = ElasticGenerator(rs_params)\n return gener.generate_resourcelist()\n\n\nclass Source(Observable):\n \"\"\"A source contains a list of resources and changes over time.\"\"\"\n\n RESOURCE_PATH = \"/resources\" # to append to base_uri\n STATIC_FILE_PATH = os.path.join(os.path.dirname(__file__), \"static\")\n\n def __init__(self, config, port):\n \"\"\"Initalize the source.\"\"\"\n super(Source, self).__init__()\n self.logger = logging.getLogger('source')\n self.config = config\n self.logger.info(\"Source config: %s \" % self.config)\n self.port = port\n self.max_res_id = 1\n self._repository = {} # {basename, {timestamp, length}}\n self.resource_list_builder = None # builder implementation\n self.changememory = None # change memory implementation\n self.no_events = 0\n self._executor = ThreadPoolExecutor(max_workers=4)\n self.publish_configs = self.config['publisher_configs']\n\n self.add_resource_list_builder(DynamicResourceListBuilder(self))\n\n @property\n def executor(self):\n return self._executor\n\n # Source capabilities\n\n def add_resource_list_builder(self, resource_list_builder):\n \"\"\"Add a resource_list builder implementation.\"\"\"\n self.resource_list_builder = resource_list_builder\n\n @property\n def has_resource_list_builder(self):\n \"\"\"Return True if the Source has an resource_list builder.\"\"\"\n return bool(self.resource_list_builder is not None)\n\n def add_changememory(self, changememory):\n \"\"\"Add a changememory implementation.\"\"\"\n self.changememory = changememory\n\n @property\n def has_changememory(self):\n \"\"\"Return True if a source maintains a change memory.\"\"\"\n return bool(self.changememory is not None)\n\n # Bootstrap Source\n\n def bootstrap(self):\n \"\"\"Bootstrap the source with a set of resources.\"\"\"\n self.logger.info(\"Bootstrapping source...\")\n if self.has_changememory:\n self.changememory.bootstrap()\n if self.has_resource_list_builder:\n # todo do it for all of them\n self.resource_list_builder.bootstrap()\n self._log_stats()\n\n # Source data accessors\n\n @property\n def describedby_uri(self):\n \"\"\"Description of Source\"\"\"\n return '/'\n\n @property\n def source_description_uri(self):\n \"\"\"URI of Source Description document.\n\n Will use standard pattern for well-known URI unless\n an explicit configuration is given.\n \"\"\"\n if 'source_description_uri' in self.config:\n return self.config['source_description_uri']\n return '.well-known/resourcesync'\n\n @property\n def resource_count(self):\n \"\"\"The number of resources in the source's repository.\"\"\"\n return len(self._repository)\n\n @property\n def random_resource(self):\n \"\"\"Return a single random resource.\"\"\"\n rand_res = self.random_resources()\n if len(rand_res) == 1:\n return rand_res[0]\n else:\n return None\n\n def resource(self, basename):\n \"\"\"Create and return a resource object.\"\"\"\n return None\n\n def random_resources(self, number=1):\n \"\"\"Return a random set of resources, at most all resources.\"\"\"\n if number > len(self._repository):\n number = len(self._repository)\n rand_basenames = random.sample(self._repository.keys(), number)\n return [self.resource(basename) for basename in rand_basenames]\n\n def _log_stats(self):\n \"\"\"Output current source statistics via the logger.\"\"\"\n stats = {\n 'no_resources': self.resource_count,\n 'no_events': self.no_events\n }\n self.logger.info(\"Source stats: %s\" % stats)\n\n def __str__(self):\n \"\"\"Print out the source's resources.\"\"\"\n return pprint.pformat(self._repository)\n\n\n" }, { "alpha_fraction": 0.5898058414459229, "alphanum_fraction": 0.7305825352668762, "avg_line_length": 20.736841201782227, "blob_id": "2c12084b3ef2dc675ace1831b9fe8e82fd141a89", "content_id": "7bd1a6aa6a9bb65505a3d3b809f88c2f8dd29945", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 412, "license_type": "no_license", "max_line_length": 67, "num_lines": 19, "path": "/requirements.txt", "repo_name": "giorgiobasile/resync-py-server", "src_encoding": "UTF-8", "text": "appdirs==1.4.0\nargh==0.26.2\ndecorator==4.0.11\ndefusedxml==0.4.1\nelasticsearch==1.9.0\nlogutils==0.3.3\npackaging==16.8\npathtools==0.1.2\npyparsing==2.1.10\npython-dateutil==2.6.0\nPyYAML==3.12\nrequests==2.13.0\nsix==1.10.0\ntornado==4.4.2\nurllib3==1.20\nvalidators==0.11.2\nwatchdog==0.8.3\n-e git+https://github.com/EHRI/rspub-core@master#egg=rspub-core\n-e git+https://github.com/EHRI/resync.git@ehribranch#egg=resyncehri" }, { "alpha_fraction": 0.5731366872787476, "alphanum_fraction": 0.5747309923171997, "avg_line_length": 32.413333892822266, "blob_id": "7a70ace8560c4e44ef491c74d33c48bb2d0d686b", "content_id": "6fa5fb2963d13675f2dc86e5d364b26280e1c60b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2509, "license_type": "no_license", "max_line_length": 104, "num_lines": 75, "path": "/resyncserver/elastic/elastic_rs_paras.py", "repo_name": "giorgiobasile/resync-py-server", "src_encoding": "UTF-8", "text": "import urllib.parse\n\nimport validators\nfrom rspub.core.rs_paras import RsParameters\n\n\nclass ElasticRsParameters(RsParameters):\n\n def __init__(self, **kwargs):\n super(ElasticRsParameters, self).__init__(**kwargs)\n self.publisher_name = kwargs['publisher_name']\n self.res_type = kwargs['res_type']\n self.elastic_host = kwargs['elastic_host']\n self.elastic_port = kwargs['elastic_port']\n self.elastic_index = kwargs['elastic_index']\n self.elastic_resource_type = kwargs['elastic_resource_type']\n\n # def abs_metadata_dir(self) -> str:\n # \"\"\"\n # ``derived`` :samp:`The absolute path to metadata directory`\n # :return: absolute path to metadata directory\n # \"\"\"\n # return self.metadata_dir\n #\n # @property\n # def metadata_dir(self):\n # return self._metadata_dir\n #\n # @metadata_dir.setter\n # def metadata_dir(self, path):\n # if not os.path.isabs(path):\n # path = os.path.join(self.resource_dir, path)\n #\n # self._metadata_dir = path\n\n @property\n def url_prefix(self):\n return self._url_prefix\n\n @url_prefix.setter\n def url_prefix(self, value):\n if value.endswith(\"/\"):\n value = value[:-1]\n parts = urllib.parse.urlparse(value)\n if parts[0] not in [\"http\", \"https\"]: # scheme\n raise ValueError(\"URL schemes allowed are 'http' or 'https'. Given: '%s'\" % value)\n is_valid_domain = validators.domain(parts.hostname) #hostname\n\n if parts.port is None:\n is_valid_port = True\n\n else:\n is_valid_port = is_int(parts.port)\n\n if not is_valid_domain:\n raise ValueError(\"URL has invalid domain name: '%s'. Given: '%s'\" % (parts.hostname, value))\n if not is_valid_port:\n raise ValueError(\"URL has invalid port: '%s'. Given: '%s'\" % (parts.port, value))\n if parts[4] != \"\": # query\n raise ValueError(\"URL should not have a query string. Given: '%s'\" % value)\n if parts[5] != \"\": # fragment\n raise ValueError(\"URL should not have a fragment. Given: '%s'\" % value)\n is_valid_url = validators.url(value)\n if not is_valid_url:\n raise ValueError(\"URL is invalid. Given: '%s'\" % value)\n if not value.endswith(\"/\"):\n value += \"/\"\n self._url_prefix = value\n\ndef is_int(s):\n try:\n int(s)\n return True\n except ValueError:\n return False\n\n\n\n" }, { "alpha_fraction": 0.5987361669540405, "alphanum_fraction": 0.6066350936889648, "avg_line_length": 26.926469802856445, "blob_id": "d79fc31d8fd967e1e8ab2ca0d93e92ab600cd70c", "content_id": "9d2c8f540ffeb50ac31264a0048397d6304c5367", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1899, "license_type": "no_license", "max_line_length": 81, "num_lines": 68, "path": "/resync-server.py", "repo_name": "giorgiobasile/resync-py-server", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\n\nResourceSync tool for exposing a changing Web data source.\n\nCreated by Giorgio Basile on 09-01-2017\n\"\"\"\nimport optparse\nimport os\n\nimport yaml\nimport logging\nimport logging.config\n\nfrom resyncserver._version import __version__\nfrom resyncserver.http_interface import HTTPInterface\nfrom resyncserver.source import Source\n\nDEFAULT_LOG_FILE = 'config/logging.yaml'\n\n\ndef main():\n\n # Define server options\n parser = optparse.OptionParser(description=\"ResourceSync Server\",\n usage='usage: %prog [options] (-h for help)',\n version='%prog '+__version__)\n parser.add_option('--config-file', '-c',\n help=\"the source configuration file\")\n parser.add_option('--log-config', '-l',\n default=DEFAULT_LOG_FILE,\n help=\"the logging configuration file\")\n parser.add_option('--port', '-p', type=int,\n default=8888,\n help=\"the HTTP interface port that the server will run on\")\n\n # Parse command line arguments\n (args, clargs) = parser.parse_args()\n\n if len(clargs) > 0:\n parser.print_help()\n return\n if args.config_file is None:\n parser.print_help()\n return\n\n logconfig = yaml.load(open(args.log_config, 'r'))\n logging.config.dictConfig(logconfig)\n\n config = yaml.load(open(args.config_file, 'r'))['source']\n\n source = Source(config, args.port)\n source.bootstrap()\n r = os.path.abspath(config['publisher_configs'][0])\n\n http_interface = HTTPInterface(source)\n try:\n pass\n print(\"ResourceSync server started on port \" + str(args.port))\n http_interface.run()\n except KeyboardInterrupt:\n print(\"Exiting gracefully...\")\n finally:\n http_interface.stop()\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.49708235263824463, "alphanum_fraction": 0.4998919367790222, "avg_line_length": 34.25190734863281, "blob_id": "21427cb175944e65dc68af027d690fc244a50ea0", "content_id": "2b694dc4aed8abfcec02b2efab55bba38d7fb7a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4627, "license_type": "no_license", "max_line_length": 191, "num_lines": 131, "path": "/resyncserver/elastic/elastic_populator.py", "repo_name": "giorgiobasile/resync-py-server", "src_encoding": "UTF-8", "text": "import optparse\nimport os\nfrom concurrent.futures import ThreadPoolExecutor\nfrom os.path import basename\n\nimport multiprocessing\nimport yaml\nfrom elasticsearch import Elasticsearch\nfrom rspub.util import defaults\n\nlimit = -1\n\ndef create_index(host, port, index, resource_type):\n es = Elasticsearch([{\"host\": host, \"port\": port}])\n mapping = {\n \"mappings\": {\n resource_type: {\n \"properties\": {\n \"filename\": {\n \"type\": \"string\",\n \"index\": \"not_analyzed\"\n },\n \"size\": {\n \"type\": \"integer\",\n \"index\": \"not_analyzed\"\n },\n \"md5\": {\n \"type\": \"string\",\n \"index\": \"not_analyzed\"\n },\n \"mime\": {\n \"type\": \"string\",\n \"index\": \"not_analyzed\"\n },\n \"time\": {\n \"type\": \"date\",\n \"index\": \"not_analyzed\"\n },\n \"publisher\": {\n \"type\": \"string\",\n \"index\": \"not_analyzed\"\n },\n \"res_type\": {\n \"type\": \"string\",\n \"index\": \"not_analyzed\"\n }\n }\n }\n }\n }\n es.indices.create(index=index, body=mapping, ignore=400)\n\n\ndef put_into_elasticsearch(elastic_host, elastic_port, elastic_index, elastic_resource_type, pub_name, res_type, file):\n stat = os.stat(file)\n doc = {\n \"filename\": file,\n \"size\": stat.st_size,\n \"md5\": defaults.md5_for_file(file),\n \"mime\": defaults.mime_type(file),\n \"time\": defaults.w3c_datetime(stat.st_ctime),\n \"publisher\": pub_name,\n \"res_type\": res_type\n }\n\n es = Elasticsearch([{\"host\": elastic_host, \"port\": elastic_port}])\n es.index(index=elastic_index, doc_type=elastic_resource_type, body=doc)\n\n\ndef traverse_folder(elastic_host, elastic_port, elastic_index, elastic_resource_type, pub_name, res_type, pub_folder):\n global limit\n count = 0\n cur_folder = pub_folder\n files_names = os.listdir(cur_folder)\n for f in files_names:\n if limit < 0 or (limit > 0 and count < limit):\n f_path = os.path.join(cur_folder, f)\n if os.path.isdir(f_path):\n traverse_folder(elastic_host, elastic_port, elastic_index, elastic_resource_type, pub_name, res_type, f_path)\n else:\n if not basename(f_path).startswith('.'):\n # todo: substitute with es bulk API\n put_into_elasticsearch(elastic_host, elastic_port, elastic_index, elastic_resource_type, pub_name, res_type, os.path.join(pub_folder, f_path))\n print(f_path)\n count += 1\n else:\n break\n\n\n\n\ndef main():\n parser = optparse.OptionParser()\n parser.add_option('--config-file', '-c',\n help=\"populator configuration file\")\n\n # Parse command line arguments\n (args, clargs) = parser.parse_args()\n\n if len(clargs) > 0:\n parser.print_help()\n return\n if args.config_file is None:\n parser.print_help()\n return\n\n config = yaml.load(open(args.config_file, 'r'))['populator']\n publishers = config['publishers']\n elastic_host = config['elastic_host']\n elastic_port = config['elastic_port']\n elastic_index = config['elastic_index']\n elastic_resource_type = config['elastic_resource_type']\n global limit\n if 'limit' in config:\n limit = config['limit']\n\n executor = ThreadPoolExecutor(max_workers=multiprocessing.cpu_count())\n\n create_index(elastic_host, elastic_port, elastic_index, elastic_resource_type)\n for publisher in publishers:\n subfolders = publisher['subfolders']\n for subfolder in subfolders:\n if 'type' in publisher:\n folder_type = publisher['type']\n else:\n folder_type = subfolder\n executor.submit(traverse_folder, elastic_host, elastic_port, elastic_index, elastic_resource_type, publisher[\"name\"], folder_type, os.path.join(publisher[\"resources\"], subfolder))\n\n\nif __name__ == '__main__':\n main()\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.7903125286102295, "alphanum_fraction": 0.7903125286102295, "avg_line_length": 48.96875, "blob_id": "1e82ae1ccb909a30f4faafc079d183ec22ec1602", "content_id": "8996ecc8df2e677e69292b381e59bd78ebea811e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3200, "license_type": "no_license", "max_line_length": 135, "num_lines": 64, "path": "/README.md", "repo_name": "giorgiobasile/resync-py-server", "src_encoding": "UTF-8", "text": "OpenMinTeD ResourceSync server demo\n===================================\n\nA Python implementation of a ResourceSync generator and server provided by the OpenMinTeD project, using \n[Elasticsearch](https://www.elastic.co/products/elasticsearch) as storage system. Given a file to be tracked, a document\n containing its metadata is stored into Elasticsearch, allowing fast retrieval and tracking. \n\nThe [resyncserver.elastic](resyncserver/elastic) package provides an extension of the [rspub-core](https://github.com/EHRI/rspub-core) \nlibrary for ResourceSync sitemaps manipulation. For demo purposes, it is possible to run three different python scripts:\n\n* [populator](resyncserver/elastic/elastic_populator.py): stores metadata of files to be tracked into Elasticsearch\n* [generator](resyncserver/elastic/elastic_generator.py): creates a Resource List from the metadata stored \ninto Elasticsearch\n* [server](resync-server.py): exposes ResourceSync sitemaps and resources on the web\n\nThese modules are executable providing well-formed configuration files. Examples of configuration files can be\nfound in the [config/test](config/test) folder.\n\nThe server implementation is based on the [resync-simulator](https://github.com/resync/resync-simulator) project.\n\n\nPopulator\n---------\nThe [elastic_populator.py](resyncserver/elastic/elastic_populator.py) module allows to record files' metadata into \nElasticsearch, given a configuration file specifying an Elasticsearch node address and the folders to be tracked. \nTo run the populator, simply write:\n\n```\ncd resync-omtd-demo\npython resyncserver/elastic/elastic_populator --config=/path/to/config\n```\n\nGenerator\n---------\nThe [elastic_generator.py](resyncserver/elastic/elastic_generator.py) module allows to generate new ResourceSync \ndocuments. It takes advantage of the rspub-core library [rspub-core](https://github.com/EHRI/rspub-core) \nlibrary, extending some of its classes in order to support document generation from Elasticsearch instead of looking \ninto the file system. The configuration file must contain all the\nparameters required by the [rspub.core.rs_paras.RsParameters](https://github.com/EHRI/rspub-core/blob/master/rspub/core/rs_paras.py)\nclass, apart from the ones regarding the selector, which will be replaced by Elasticsearch configuration parameters. \nIndeed, the role of the selector is played by Elasticsearch, which will already contain only the files that have to be \ntracked. \n\nThe generator will create a new capability list for each configuration file. A source description will be created or\nupdated if already existing.\n\nTo run the generator, simply write:\n\n```\ncd resync-omtd-demo\npython resyncserver/elastic/elastic_generator --config=/path/to/config\n```\n\nServer\n------\nThe [resync-server.py](resync-server.py) module runs a simple Tornado server to expose ResourceSync sitemaps previously \ngenerated by the generator module and possibly the actual files linked into it. For simplicity, each file into the root \nfolder provided through the configuration file is exposed through the same Tornado controller.\nTo run the server, simply write:\n\n```\ncd resync-omtd-demo\npython resync-server.py --config=/path/to/config\n```\n\n\n" } ]
11
AIFDR/riab
https://github.com/AIFDR/riab
aab9b433640464af43817e9658b14620e6b516a3
0bc2cbbef82be588e3568bb2ac8739727fb906c1
d6d9bdf00c73cfc77131a965ecd68f20e9860540
refs/heads/master
2016-09-10T02:03:20.271377
2012-03-05T09:16:24
2012-03-05T09:16:24
1,462,033
7
4
null
null
null
null
null
[ { "alpha_fraction": 0.6086223721504211, "alphanum_fraction": 0.6317437291145325, "avg_line_length": 32.75609588623047, "blob_id": "fd4e0cfa285e3aadefd49e13c6ff615e692b8253", "content_id": "63ffad08982325bbcca1d0138443499a43aa958e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 4152, "license_type": "no_license", "max_line_length": 196, "num_lines": 123, "path": "/docs/usage/plugins/tsunami_plugins.rst", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "Tsunami Plugins\n===============\n\nExample Tsunami Plugin\n----------------------\n\nThis example calculates the tsunami impact on population data\n\n\nPlugin code::\n\n\timport numpy\n\n\tfrom impact.plugins.core import FunctionProvider\n\tfrom impact.storage.raster import Raster\n\tfrom django.utils.translation import ugettext as _\n\n\n\tclass TsunamiPopulationImpactFunction(FunctionProvider):\n\t \"\"\"Risk plugin for tsunami impact on population data\n\n\t :param requires category==\"hazard\" and \\\n\t\t subcategory.startswith(\"tsunami\") and \\\n\t\t layer_type==\"raster\"\n\t :param requires category==\"exposure\" and \\\n\t\t subcategory.startswith(\"population\") and \\\n\t\t layer_type==\"raster\"\n\t \"\"\"\n\n\t def run(self, layers):\n\t\t\"\"\"Risk plugin for tsunami population\n\t\t\"\"\"\n\n\t\tthresholds = [0.2, 0.3, 0.5, 0.8, 1.0]\n\t\t#threshold = 1 # Depth above which people are regarded affected [m]\n\n\t\t# Identify hazard and exposure layers\n\t\tinundation = layers[0] # Tsunami inundation [m]\n\t\tpopulation = layers[1] # Population density\n\n\t\t# Get actual resolution\n\t\tresolution = population.get_resolution(isotropic=True)\n\n\t\t# Extract data as numeric arrays\n\t\tD = inundation.get_data(nan=0.0) # Depth\n\t\tP = population.get_data(nan=0.0) # Population density\n\n\t\t# Calculate impact as population exposed to depths > 1m\n\t\tI_map = numpy.where(D > thresholds[-1], P, 0)\n\n\t\t# Generate text with result for this study\n\t\tnumber_of_people_affected = sum(I_map.flat)\n\n\t\t# Do breakdown\n\n\t\t# Create report\n\t\tcaption = ('<table border=\"0\" width=\"320px\">'\n\t\t ' <tr><th><b>%s</b></th><th><b>%s</b></th></th>'\n\t\t ' <tr></tr>' % ('Ambang batas', 'Jumlah orang terdampak'))\n\n\t\tcounts = []\n\t\tfor i, threshold in enumerate(thresholds):\n\t\t I = numpy.where(D > threshold, P, 0)\n\t\t counts.append(sum(I.flat))\n\n\t\t caption += ' <tr><td>%s m</td><td>%i</td></tr>' % (threshold,\n\t\t counts[i])\n\n\t\tcaption += '</table>'\n\n\t\t# Create raster object and return\n\t\tR = Raster(I_map,\n\t\t projection=inundation.get_projection(),\n\t\t geotransform=inundation.get_geotransform(),\n\t\t name='People affected by more than 1m of inundation',\n\t\t keywords={'caption': caption})\n\t\treturn R\n\n\t def generate_style(self, data):\n\t\t\"\"\"Generates and SLD file based on the data values\n\t\t\"\"\"\n\n\t\ts = \"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<sld:StyledLayerDescriptor xmlns=\"http://www.opengis.net/sld\" xmlns:sld=\"http://www.opengis.net/sld\" xmlns:ogc=\"http://www.opengis.net/ogc\" xmlns:gml=\"http://www.opengis.net/gml\" version=\"1.0.0\">\n\t <sld:NamedLayer>\n\t <sld:Name>People affected by more than 1m of inundation</sld:Name>\n\t <sld:UserStyle>\n\t <sld:Name>People affected by more than 1m of inundation</sld:Name>\n\t <sld:Title>People Affected By More Than 1m Of Inundation</sld:Title>\n\t <sld:Abstract>People Affected By More Than 1m Of Inundation</sld:Abstract>\n\t <sld:FeatureTypeStyle>\n\t\t<sld:Name>People affected by more than 1m of inundation</sld:Name>\n\t\t<sld:Rule>\n\t\t <sld:RasterSymbolizer>\n\t\t <sld:Geometry>\n\t\t <ogc:PropertyName>geom</ogc:PropertyName>\n\t\t </sld:Geometry>\n\t\t <sld:ChannelSelection>\n\t\t <sld:GrayChannel>\n\t\t <sld:SourceChannelName>1</sld:SourceChannelName>\n\t\t </sld:GrayChannel>\n\t\t </sld:ChannelSelection>\n\t\t <sld:ColorMap>\n\t\t <sld:ColorMapEntry color=\"#ffffff\" opacity=\"0\" quantity=\"-9999.0\"/>\n\t\t <sld:ColorMapEntry color=\"#38A800\" opacity=\"0\" quantity=\"0.01\"/>\n\t\t <sld:ColorMapEntry color=\"#38A800\" quantity=\"0.02\"/>\n\t\t <sld:ColorMapEntry color=\"#79C900\" quantity=\"0.05\"/>\n\t\t <sld:ColorMapEntry color=\"#CEED00\" quantity=\"0.1\"/>\n\t\t <sld:ColorMapEntry color=\"#FFCC00\" quantity=\"0.2\"/>\n\t\t <sld:ColorMapEntry color=\"#FF6600\" quantity=\"0.3\"/>\n\t\t <sld:ColorMapEntry color=\"#FF0000\" quantity=\"0.5\"/>\n\t\t <sld:ColorMapEntry color=\"#7A0000\" quantity=\"0.9\"/>\n\t\t </sld:ColorMap>\n\t\t </sld:RasterSymbolizer>\n\t\t</sld:Rule>\n\t </sld:FeatureTypeStyle>\n\t </sld:UserStyle>\n\t </sld:NamedLayer>\n\t</sld:StyledLayerDescriptor>\n\n\t\t\"\"\"\n\n\t\treturn s\n" }, { "alpha_fraction": 0.6954787373542786, "alphanum_fraction": 0.6984707713127136, "avg_line_length": 28.490196228027344, "blob_id": "1f534f3721746210a87039c5c44ed8c1ef9067ca", "content_id": "5cffc0c65e8729b0e7f23b99558dc5eba6a2f6e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 3008, "license_type": "no_license", "max_line_length": 205, "num_lines": 102, "path": "/docs/development/contributing.rst", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "Contributing\n============\n\nCoding conventions for Risiko\n-----------------------------\n\n* Coding must follow a style guide. In case of Python it is http://www.python.org/dev/peps/pep-0008 and using the command line tool pep8 to enforce this\n* Python documentation guide: http://www.python.org/dev/peps/pep-0257\n* Adherence to regression/unit testing wherever possible\n* Use of github for revision control, issue tracking and management\n* Simple deployment procedure i.e. automatic system configuration and installation of dependencies (at least for Ubuntu)\n* Develop in the spirit of XP/Agile, i.e. frequent releases, continuous integration and iterative development. The master branch should always be assumed to represent a working demo with all tests passing.\n\n\nBranching guide\n---------------\n\nRisiko follows the branching model laid out in this paper:\nhttp://nvie.com/posts/a-successful-git-branching-model\n\nWith the develop branch being the backbone default branch\nwith the bleeding edge and master always a stable release.\n\n\n\nProcess for developers adding a new feature\n-------------------------------------------\n\nCreate a feature branch\n * git checkout -b <featurebranch> develop\n\nWrite new code and tests\n ...\n\nPublish (if unfinished)\n * git push origin <featurebranch>\n\nTo keep branch up to date\n * git checkout <featurebranch>\n * git merge origin develop\n\nWhen all tests pass, either merge into develop\n * git checkout develop\n * git merge --no-ff <featurebranch>\n (possibly resolve conflict and verify test suite runs)\n * git push origin develop\n\nOr issue a pull request through github\n ..\n\nTo delete when branch is no longer needed\n * git push origin :<featurebranch>\n\n\n\nProcess for making a new release\n--------------------------------\n\nCreate a release branch from the current development branch\n * git checkout -b <releasebranch> develop\n\nStart working on release specific development (such as bumping version number)\n ...\n\nWhen ready, merge release into master effectively making it official\n * git checkout master\n * git merge --no-ff <releasebranch>\n * git tag -a <version number>\n * git push origin master\n\nUpdate development branch as well\n * git checkout develop\n * git merge --no-ff <releasebranch>\n (resolve conflicts)\n * git push origin develop\n\nDelete release branch\n * git branch -d <releasebranch>\n or\n * git push origin :<releasebranch>\n\n\nProcess for making a hotfix on master\n-------------------------------------\n\nCreate a hotfix branch from master\n * git checkout -b <hotfixbranch> master\n\nStart working on fix (including bumping minor version number)\n ...\n\nWhen fixed, merge fix back into both master and develop\n * git checkout master\n * git merge --no-ff <hotfixbranch>\n * git tag -a <version number>\n * git push origin master\n * git checkout develop\n * git merge --no-ff <hotfixbranch>\n * git push origin develop\n\nDelete hotfix branch\n * git branch -d <hotfixbranch>\n" }, { "alpha_fraction": 0.6954545378684998, "alphanum_fraction": 0.6954545378684998, "avg_line_length": 26.25, "blob_id": "31e61800110cd8c7b3a64262566b1fb0c83c0c66", "content_id": "500a9577189b78d61543f1f45d200dca06a4a97f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 220, "license_type": "no_license", "max_line_length": 75, "num_lines": 8, "path": "/docs/development/git.rst", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "GIT survival links\n==================\n\nCheat sheet: http://cheat.errtheblog.com/s/git\n\nGit commands: http://www.kernel.org/pub/software/scm/git/docs/everyday.html\n\nGit guide: http://spheredev.org/wiki/Git_for_the_lazy\n\n\n" }, { "alpha_fraction": 0.5352941155433655, "alphanum_fraction": 0.5882353186607361, "avg_line_length": 23.14285659790039, "blob_id": "8880b042d50b517e685976bfc4d4c159b48cdc85", "content_id": "f3fd030dc9b3ee91240ca49615340c4fc7625351", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 170, "license_type": "no_license", "max_line_length": 41, "num_lines": 7, "path": "/docs/development/project-links.rst", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "Links for the Risiko development\n================================\n\nAmazon data store: http://aws.amazon.com/\n - username: [email protected]\n\nDemo server: ssh [email protected]\n\n" }, { "alpha_fraction": 0.6214285492897034, "alphanum_fraction": 0.6285714507102966, "avg_line_length": 27, "blob_id": "ba4741c20e89da80665683e9127e256c4c8b04c6", "content_id": "5b2c61ccb21813f37db5d7ed8dd4e26bb65f3c18", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 140, "license_type": "no_license", "max_line_length": 60, "num_lines": 5, "path": "/impact/plugins/earthquake/__init__.py", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "import os\nfrom glob import glob\n\npattern = os.path.dirname(__file__) + '/*.py'\n__all__ = [os.path.basename(f)[: -3] for f in glob(pattern)]\n" }, { "alpha_fraction": 0.7252964377403259, "alphanum_fraction": 0.7371541261672974, "avg_line_length": 24.299999237060547, "blob_id": "79739b7c374b02547da69d86c600779cf34a46f9", "content_id": "91f01d4be62ecde96915f10f19f1b5a3abb9e74d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 506, "license_type": "no_license", "max_line_length": 72, "num_lines": 20, "path": "/impact/plugins/__init__.py", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "\"\"\"\nBasic plugin framework based on::\nhttp://martyalchin.com/2008/jan/10/simple-plugin-framework/\n\"\"\"\n\nimport os\nimport os.path\nimport glob\n\ndirname = os.path.dirname(__file__)\n\n# Import all the subdirectories\nfor f in os.listdir(dirname):\n if os.path.isdir(os.path.join(dirname, f)):\n exec('from impact.plugins.%s import *' % f, locals(), globals())\n\n\nfrom impact.plugins.core import FunctionProvider\nfrom impact.plugins.core import get_plugins\nfrom impact.plugins.core import compatible_layers\n" }, { "alpha_fraction": 0.5663082599639893, "alphanum_fraction": 0.5698924660682678, "avg_line_length": 14.38888931274414, "blob_id": "9fcf46b0023114e34f1c72afee39c62db16af0ed", "content_id": "7074de0c90b0a4baa9c50902c76fd969c5fa95be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 279, "license_type": "no_license", "max_line_length": 38, "num_lines": 18, "path": "/docs/deployment/production_maintenance.rst", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "Deployment\n==========\n\nMaintenance of production installation\n--------------------------------------\n\n\n * To update code::\n\n git pull origin <branchname>\n sudo service apache2 restart\n\n * To clear demo server\n TBA\n\n * To upload new data::\n\n risiko-upload <dirname>\n\n\n" }, { "alpha_fraction": 0.6400153040885925, "alphanum_fraction": 0.642310619354248, "avg_line_length": 28.044445037841797, "blob_id": "84692b89c69535a0a8038702e23e3b212bf51f26", "content_id": "b85b77292b9e995a45b16d481eece2aaa381ef7c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2614, "license_type": "no_license", "max_line_length": 79, "num_lines": 90, "path": "/impact/engine/interpolation.py", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "\"\"\"Wrapper around interpolation.\n\nIt provides interpolation functionality to Raster and Vector instances\nusing the underlying interpolation algorithm in interpolate2d.py\n\"\"\"\n\nimport numpy\nfrom impact.engine.interpolation2d import interpolate_raster\nfrom impact.storage.vector import Vector\nfrom impact.storage.vector import convert_polygons_to_centroids\n\n\ndef interpolate_raster_vector_points(R, V, name=None):\n \"\"\"Interpolate from raster layer to point data\n\n Input\n R: Raster data set (grid)\n V: Vector data set (points)\n name: Name for new attribute.\n If None (default) the name of R is used\n\n Output\n I: Vector data set; points located as V with values interpolated from R\n\n \"\"\"\n\n msg = ('There are no data points to interpolate to. Perhaps zoom out '\n 'and try again')\n assert len(V) > 0, msg\n\n # Input checks\n assert R.is_raster\n assert V.is_vector\n assert V.is_point_data\n\n # Get raster data and corresponding x and y axes\n A = R.get_data(nan=True)\n longitudes, latitudes = R.get_geometry()\n assert len(longitudes) == A.shape[1]\n assert len(latitudes) == A.shape[0]\n\n # Get vector point geometry as Nx2 array\n coordinates = numpy.array(V.get_geometry(),\n dtype='d',\n copy=False)\n\n # Interpolate and create new attribute\n N = len(V)\n attributes = []\n if name is None:\n name = R.get_name()\n\n values = interpolate_raster(longitudes, latitudes, A,\n coordinates, mode='linear')\n\n # Create list of dictionaries for this attribute and return\n for i in range(N):\n attributes.append({name: values[i]})\n\n return Vector(data=attributes, projection=V.get_projection(),\n geometry=coordinates)\n\n\ndef interpolate_raster_vector(R, V, name=None):\n \"\"\"Interpolate from raster layer to vector data\n\n Input\n R: Raster data set (grid)\n V: Vector data set (points or polygons)\n name: Name for new attribute.\n If None (default) the name of R is used\n\n Output\n I: Vector data set; points located as V with values interpolated from R\n\n Note: If target geometry is polygon, data will be interpolated to\n its centroids and the output is a point data set.\n \"\"\"\n\n # Input checks\n assert R.is_raster\n assert V.is_vector\n\n if V.is_polygon_data:\n # Use centroids, in case of polygons\n P = convert_polygons_to_centroids(V)\n else:\n P = V\n\n return interpolate_raster_vector_points(R, P, name=name)\n" }, { "alpha_fraction": 0.5813596844673157, "alphanum_fraction": 0.5927755832672119, "avg_line_length": 35.006134033203125, "blob_id": "dc341bd5ec6098c1b33d31b1967e87c1277d6542", "content_id": "36bc0dbd2279085ad6c9d81698d3aa9dccd044d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5869, "license_type": "no_license", "max_line_length": 79, "num_lines": 163, "path": "/impact/tests/test_api.py", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "import unittest\nimport os\nfrom django.test.client import Client\nfrom django.utils import simplejson as json\nfrom django.conf import settings\nfrom impact.storage.io import save_to_geonode\n\nfrom geonode.maps.utils import check_geonode_is_up\nfrom geonode.maps.models import Layer\nfrom geonode.maps.utils import get_valid_user\nfrom impact.storage.io import check_layer\nfrom impact.tests.utilities import TESTDATA, INTERNAL_SERVER_URL\n\nfrom impact.tests.plugins import unspecific_building_impact_model\n\n\nclass Test_HTTP(unittest.TestCase):\n \"\"\"Test suite for API\n \"\"\"\n\n def setUp(self):\n \"\"\"Check geonode and create valid superuser\n \"\"\"\n check_geonode_is_up()\n self.user = get_valid_user()\n\n def tearDown(self):\n pass\n\n def test_functions(self):\n \"\"\"Functions can be retrieved from the HTTP Rest API\n \"\"\"\n\n c = Client()\n rv = c.get('/impact/api/functions/')\n self.assertEqual(rv.status_code, 200)\n self.assertEqual(rv['Content-Type'], 'application/json')\n data = json.loads(rv.content)\n\n msg = ('The api should return a dictionary with at least one item. '\n 'The key of that item should be \"functions\"')\n assert 'functions' in data, msg\n functions = data['functions']\n\n msg = ('No functions were found in the functions list, '\n 'not even the built-in ones')\n assert len(functions) > 0, msg\n\n def test_layers(self):\n \"\"\"Layers can be retrieved from the HTTP Rest API\n \"\"\"\n\n c = Client()\n rv = c.get('/impact/api/layers/')\n self.assertEqual(rv.status_code, 200)\n self.assertEqual(rv['Content-Type'], 'application/json')\n data = json.loads(rv.content)\n\n def test_calculate_fatality(self):\n \"\"\"Earthquake fatalities calculation via the HTTP Rest API is correct\n \"\"\"\n\n # Upload required data first\n for filename in ['Earthquake_Ground_Shaking.asc',\n 'Population_2010_clip.tif']:\n thefile = os.path.join(TESTDATA, filename)\n uploaded = save_to_geonode(thefile, user=self.user, overwrite=True)\n check_layer(uploaded, full=True)\n\n # Run calculation through API\n c = Client()\n rv = c.post('/impact/api/calculate/',\n dict(hazard_server=INTERNAL_SERVER_URL,\n hazard='geonode:earthquake_ground_shaking',\n exposure='geonode:population_2010_clip',\n exposure_server=INTERNAL_SERVER_URL,\n bbox='99.36,-2.199,102.237,0.00',\n impact_function='Earthquake Fatality Function',\n keywords='test,earthquake,fatality'))\n\n msg = 'Expected status code 200, got %i' % rv.status_code\n self.assertEqual(rv.status_code, 200), msg\n\n msg = ('Expected Content-Type \"application/json\", '\n 'got %s' % rv['Content-Type'])\n self.assertEqual(rv['Content-Type'], 'application/json'), msg\n\n data = json.loads(rv.content)\n\n if data['stacktrace'] is not None:\n msg = data['stacktrace']\n raise Exception(msg)\n\n assert 'hazard_layer' in data.keys()\n assert 'exposure_layer' in data.keys()\n assert 'run_duration' in data.keys()\n assert 'run_date' in data.keys()\n assert 'layer' in data.keys()\n assert 'bbox' in data.keys()\n assert 'impact_function' in data.keys()\n\n layer_uri = data['layer']\n\n #FIXME: This is not a good way to access the layer name\n typename = layer_uri.split('/')[4]\n name = typename.split(':')[1]\n\n # Check the autogenerated styles were correctly uploaded\n layer = Layer.objects.get(name=name)\n\n msg = ('A new style should have been created for layer [%s] '\n 'got [%s] style instead.' % (name, layer.default_style.name))\n assert layer.default_style.name == name, msg\n\n def test_calculate_school_damage(self):\n \"\"\"Earthquake school damage calculation works via the HTTP REST API\n \"\"\"\n\n # Upload required data first\n for filename in ['lembang_mmi_hazmap.asc',\n 'lembang_schools.shp']:\n thefile = os.path.join(TESTDATA, filename)\n uploaded = save_to_geonode(thefile, user=self.user, overwrite=True)\n check_layer(uploaded, full=True)\n\n # Run calculation through API\n c = Client()\n rv = c.post('/impact/api/calculate/', data=dict(\n hazard_server=INTERNAL_SERVER_URL,\n hazard='geonode:lembang_mmi_hazmap',\n exposure_server=INTERNAL_SERVER_URL,\n exposure='geonode:lembang_schools',\n bbox='105.592,-7.809,110.159,-5.647',\n impact_function='Earthquake Building Damage Function',\n keywords='test,schools,lembang',\n ))\n\n msg = 'Expected status code 200, got %i' % rv.status_code\n self.assertEqual(rv.status_code, 200), msg\n\n msg = ('Expected Content-Type \"application/json\", '\n 'got %s' % rv['Content-Type'])\n self.assertEqual(rv['Content-Type'], 'application/json'), msg\n\n data = json.loads(rv.content)\n\n if data['stacktrace'] is not None:\n msg = data['stacktrace']\n raise Exception(msg)\n\n assert 'hazard_layer' in data.keys()\n assert 'exposure_layer' in data.keys()\n assert 'run_duration' in data.keys()\n assert 'run_date' in data.keys()\n assert 'layer' in data.keys()\n\n # FIXME (Ole): Download result and check.\n\n\nif __name__ == '__main__':\n suite = unittest.makeSuite(Test_HTTP, 'test')\n runner = unittest.TextTestRunner(verbosity=2)\n runner.run(suite)\n" }, { "alpha_fraction": 0.5908809304237366, "alphanum_fraction": 0.6035348176956177, "avg_line_length": 34.9901008605957, "blob_id": "b10a6e9ac6f0a8d5d2a8a2f289db86372117077c", "content_id": "92785679e742563419ce464e009b0db5bcd1db4c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 14541, "license_type": "no_license", "max_line_length": 311, "num_lines": 404, "path": "/docs/usage/plugins/development.rst", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "==================\nPlugin Development\n==================\n\n------------\nIntroduction\n------------\n\nRisiko contains a plugin system that allows complex impact functions to be implemented in Python (http://www.python.org) whilst (ideally) minimizing\nthe need to understand all the complexity of the handling the hazard and exposure layers. Features of the \nRisiko plugin system are:\n\n* Auto registration of new plugins after restart\n* Derivation of more complex plugins from simpler ones\n* Auto hiding for plugins that could not be run (depending on the requirements)\n* Allow for additional functionality to be added easily\n* Provide uptodate documentation on plugin functionality\n\nFor details about the internal workings of the plugin subsystem please consult the developers guide in section :ref:`sec-plugin-manager`. \nThere are also many examples in this section showing plugins used for earthquake, tusnami and flood which can act as templates for your own plugins. \n\n-------------------------------------------\nWriting a Simple Raster Plugin: Tutorial 01\n-------------------------------------------\n\nThis section provides a beginners tutorial on writing a simple earthquke impact plugin from scratch. You will need to be familiar with the basics of Python to be able to write and debug plugins - if you are new to Python the standard Python tutorial is a great place to start (http://docs.python.org/tutorial/).\n\nFor this plugin we want to calculate a simple impact by using the following function of \nthe severity of hazard (i.e. the amount of ground shaking - H) by the exposure \n(i.e. the number of people in that area - P). e.g.::\n\n Impact = 10 ** (a * H - b) * P\n \n where \n H: Raster layer of MMI ground shaking\n P: Raster layer of population data on the same grid as H\n a,b: Parameters that were tuned from real world data\n \n\nDefining the impact class\n+++++++++++++++++++++++++\n\nAs the first step we need to define the plugin class.::\n\n class SimpleImpactEarthquakeFunction(FunctionProvider)\n\nEvery plugin must be subclassed from FunctionProvider. This is the\nmethod of registration for the plugin and allows the Risiko Plugin \nManager to know what plugins are available.\n\nImpact Parameters\n+++++++++++++++++\n\nEach plugin needs to be used in the correct context. Using a flood impact function for earthquakes will likely yield misleading\nresults at best! As such pugins may have a variety of conditions that need to be met before they can be run. Such conditions\nmay include:\n\n* The type of hazard\n* The type of exposure\n* The form of the layer data (raster or vector)\n* The measure or unit type of a layer\n* Any other meta data defined in the layer\n\nIn the future plugins may also support filtering by:\n* The geographic location\n* The avaliable layer meta data\n\nRisiko will try to show users only those plugins that can be validly run.\n\nThese parameters required to run the plugin, and indeed all parameters specific to the plugin, \nare defined in the doc string of the class::\n\n class SimpleImpactEarthquakeFunction(FunctionProvider):\n \"\"\"Simple plugin for earthquake damage\n\n :author Allen\n :rating 1\n :param requires category=='hazard' and \\\n subcategory.startswith('earthquake') and \\\n layer_type=='raster'\n :param requires category=='exposure' and \\\n subcategory.startswith('population') and \\\n layer_type=='raster'\n \"\"\"\n\nThis tells the Risiko plugin manager that this plugin requires at a minimum inputs of\n\n* category of 'hazard', with a layer subcategory of 'earthquake' and it must be a layerType of 'Raster'\n* category of 'exposure', with a layer subcategory of 'earthquake' and it must be a layerType of 'Raster'\n\nThe `require` expression can be any artibary python expression that can be evaluated.\n\n.. note::\n\t1. Lines can be broken using the line continuation character '\\\\' at the end of a line\n\t2. If any one of the conditions is not met the plugin will not be visible from the impact selection box.\n\nThe calculation function\n++++++++++++++++++++++++\n\nEach plugin must then define a `run` method which is the plugin execution code::\n\n @staticmethod\n def run(input):\n\t\nThe parameters are passed in as a dictionary. It is up to the framework to populate the\ndictionary correctly in this case with keys containing relavent data for the exposure and hazard.::\n\n @staticmethod\n def run(layers,\n a=0.97429, b=11.037):\n \"\"\"Risk plugin for earthquake fatalities\n\n Input\n layers: List of layers expected to contain\n H: Raster layer of MMI ground shaking\n P: Raster layer of population data on the same grid as H\n \"\"\"\n\n # Identify input layers\n intensity = layers[0]\n population = layers[1]\n\n # Extract data\n H = intensity.get_data(nan=0)\n P = population.get_data(nan=0)\n\n # Calculate impact\n F = 10 ** (a * H - b) * P\n\n # Create new layer and return\n R = Raster(F,\n projection=population.get_projection(),\n geotransform=population.get_geotransform(),\n name='Estimated fatalities')\n return R\n\n\n\nAt the end of the function the calculated impact layer R is returned. This return layer \nin our example is a Raster layer the correct projection for this layer is ensured by passing\nin the input layer projections.\n\n\nInstalling the plugin\n+++++++++++++++++++++\n\nThe whole plugin file will now read::\n\n from impact.plugins.core import FunctionProvider\n from impact.storage.raster import Raster\n\n class SimpleImpactEarthquakeFunction(FunctionProvider):\n\t \"\"\"Simple plugin for earthquake damage\n\t\n\t :author Allen\n\t :rating 1\n\t :param requires category=='hazard' and \\\n\t subcategory.startswith('earthquake') and \\\n\t layer_type=='raster'\n\t :param requires category=='exposure' and \\\n\t subcategory.startswith('population') and \\\n\t layer_type=='raster'\n\t \"\"\"\n\n\t @staticmethod\n\t def run(layers,\n\t a=0.97429, b=11.037):\n\t \"\"\"Risk plugin for earthquake fatalities\n\t\n\t Input\n\t layers: List of layers expected to contain\n\t H: Raster layer of MMI ground shaking\n\t P: Raster layer of population data on the same grid as H\n\t \"\"\"\n\t\n\t # Identify input layers\n\t intensity = layers[0]\n\t population = layers[1]\n\t\n\t # Extract data\n\t H = intensity.get_data(nan=0)\n\t P = population.get_data(nan=0)\n\t\n\t # Calculate impact\n\t F = 10 ** (a * H - b) * P\n\t\n\t # Create new layer and return\n\t R = Raster(F,\n\t projection=population.get_projection(),\n\t geotransform=population.get_geotransform(),\n\t name='Estimated fatalities')\n\t return R\n\nIf this is saved as SimpleImpactEarthquakeFunction.py\n\nSince its an earthquake plugin save the code into the following directory::\n\n\t[root risiko dir]/riab/riab/impact/plugins/earthquake directory. \n\nThen restart Risiko using::\n\n\trisiko-stop\n\trisiko-start\n\n.. note:: Once the plugin has been registered you will not need to restart when you make changes to the plugin.\n\nTesting the plugin\n++++++++++++++++++\n\n\nIf you now go to your local Riskio install (by default 127.0.0.1:8000) you can select the following from the demo data:\n\n* Earthquake ground shaking\n* Glp10ag (Population for Indonesia)\n\n.. figure:: /images/plugin_tutorial01_1.png\n\t:scale: 100 %\n \n **Figure 1. The parameter selection box.**\n\n.. note:: If you don't see any demo data please follow the quick start instructions :ref:`sec-quick-start`.\n\nYou should see your impact function in the fuction selection box as `Simple Impact Earthquake Function`. \nThe Plugin name shown here is derived from the plugin class name SimpleImpactEarthquakeFunction.\n\n\n.. figure:: /images/plugin_tutorial01_2.png\n\t:scale: 100 %\n \n **Figure 2. The demo data for the hazard and population shown.**\n\n\nThen click on calculate. \n\nIf, after a few seconds, you see a new layer appear showing estimated fatalities then congratulations!\n\n.. figure:: /images/plugin_tutorial01_4.png\n\t:scale: 100 %\n \n **Figure 3. The fatality impact calculated using our simple impact function**\n\nIf you get an error please check you have carefully followed the instructions above (`Installing the plugin`_).\n\n.. figure:: /images/plugin_tutorial01_5.png\n\t:scale: 100 %\n \n **Figure 4. An example syntax error in the plugin.**\n\n.. note:: FIXME: Include some information about the error logging file \n\n\n\n-------------------------------------------\nWriting a Simple Vector Plugin: Tutorial 02\n-------------------------------------------\n\nFor the next tutorial we will look at a flood impact plugin. \nThis plugin differs from the above tutorial as it deals with vector data and also applies styling to the output.\n\n.. note:: FIXME: Include more description about the various components of the plugin. \n\nThe plugin code is:: \n\n\n\tfrom django.template.loader import render_to_string\n\tfrom impact.plugins.core import FunctionProvider\n\tfrom impact.storage.vector import Vector\n\tfrom django.utils.translation import ugettext as _\n\tfrom impact.plugins.utilities import PointZoomSize\n\tfrom impact.plugins.utilities import PointClassColor\n\tfrom impact.plugins.utilities import PointSymbol\n\timport scipy.stats\n\t\n\t\n\tclass FloodBuildingImpactFunction(FunctionProvider):\n\t \"\"\"Risk plugin for flood impact on building data\n\t\n\t :param requires category=='hazard' and \\\n\t subcategory.startswith('flood') and \\\n\t layer_type=='raster' and \\\n\t unit=='m'\n\t :param requires category=='exposure' and \\\n\t subcategory.startswith('building')\n\t \"\"\"\n\t\n\t target_field = 'AFFECTED'\n\t\n\t def run(self, layers):\n\t \"\"\"Risk plugin for tsunami population\n\t \"\"\"\n\t\n\t # Extract data\n\t # FIXME (Ole): This will be replaced by a helper function\n\t # to separate hazard from exposure using keywords\n\t H = layers[0] # Depth\n\t E = layers[1] # Building locations\n\t\n\t # Interpolate hazard level to building locations\n\t H = H.interpolate(E)\n\t\n\t # Extract relevant numerical data\n\t coordinates = E.get_geometry()\n\t depth = H.get_data()\n\t N = len(depth)\n\t\n\t # List attributes to carry forward to result layer\n\t attributes = E.get_attribute_names()\n\t\n\t #print attributes\n\t #print 'Number of population points', N\n\t\n\t # Calculate population impact\n\t count = 0\n\t building_impact = []\n\t for i in range(N):\n\t dep = float(depth[i].values()[0])\n\t\n\t # Tag and count\n\t if dep > 0.1:\n\t affected = 99.5\n\t count += 1\n\t else:\n\t affected = 0\n\t\n\t # Collect depth and calculated damage\n\t result_dict = {'AFFECTED': affected,\n\t 'DEPTH': dep}\n\t\n\t # Carry all original attributes forward\n\t for key in attributes:\n\t result_dict[key] = E.get_data(key, i)\n\t\n\t # Record result for this feature\n\t building_impact.append(result_dict)\n\t\n\t # Create report\n\t caption = ('<table border=\"0\" width=\"320px\">'\n\t ' <tr><th><b>%s</b></th><th><b>%s</b></th></th>'\n\t ' <tr></tr>'\n\t ' <tr><td>%s&#58;</td><td>%i</td></tr>'\n\t ' <tr><td>%s (> 10 cm) &#58;</td><td>%i</td></tr>'\n\t ' <tr><td>%s (< 10 cm) &#58;</td><td>%i</td></tr>'\n\t '</table>' % (_('Buildings'), _('Total'),\n\t _('All'), N,\n\t _('Inundated'), count,\n\t _('Not inundated'), N - count))\n\t\n\t # Create vector layer and return\n\t V = Vector(data=building_impact,\n\t projection=E.get_projection(),\n\t geometry=coordinates,\n\t name='Estimated buildings affected',\n\t keywords={'caption': caption})\n\t return V\n\t\n\t def generate_style(self, data):\n\t \"\"\"Generates and SLD file based on the data values\n\t \"\"\"\n\t\n\t DEFAULT_SYMBOL = 'circle'\n\t\n\t symbol_field = None\n\t symbol_keys = [None, '']\n\t symbol_values = [DEFAULT_SYMBOL, DEFAULT_SYMBOL]\n\t\n\t scale_keys = [10000000000, 10000000, 5000000, 1000000,\n\t 500000, 250000, 100000]\n\t scale_values = [5, 5, 5, 5, 5, 8, 14]\n\t\n\t class_keys = ['Not affected', 'Greater than 10 cm']\n\t class_values = [{'min': 0, 'max': 90,\n\t 'color': '#cccccc', 'opacity': '0.2'},\n\t {'min': 90, 'max': 100,\n\t 'color': '#F31a0c', 'opacity': '1'}]\n\t\n\t if self.symbol_field in data.get_attribute_names():\n\t symbol_field = self.symbol_field\n\t\n\t symbol_keys.extend(['Church/Mosque', 'Commercial (office)',\n\t 'Hotel',\n\t 'Medical facility', 'Other',\n\t 'Other industrial',\n\t 'Residential', 'Retail', 'School',\n\t 'Unknown', 'Warehouse'])\n\t\n\t symbol_values.extend([DEFAULT_SYMBOL, DEFAULT_SYMBOL,\n\t DEFAULT_SYMBOL,\n\t DEFAULT_SYMBOL, DEFAULT_SYMBOL,\n\t DEFAULT_SYMBOL,\n\t DEFAULT_SYMBOL, DEFAULT_SYMBOL,\n\t DEFAULT_SYMBOL,\n\t DEFAULT_SYMBOL, DEFAULT_SYMBOL])\n\t\n\t params = dict(name=data.get_name(),\n\t damage_field=self.target_field,\n\t symbol_field=symbol_field,\n\t symbols=dict(zip(symbol_keys, symbol_values)),\n\t scales=dict(zip(scale_keys, scale_values)),\n\t classifications=dict(zip(class_keys, class_values)))\n\t\n\t return render_to_string('impact/styles/point_classes.sld', params)\n\n\n[https://github.com/AIFDR/riab/blob/develop/docs/usage/plugins/development.rst]\n\n" }, { "alpha_fraction": 0.5562347173690796, "alphanum_fraction": 0.5701406002044678, "avg_line_length": 41.771240234375, "blob_id": "059e1f5b0629d67c9bcd2ac667957075065ecc20", "content_id": "bbe06f6c1541daa46227b1c3e85717856ea0e877", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ant Build System", "length_bytes": 6544, "license_type": "no_license", "max_line_length": 130, "num_lines": 153, "path": "/calculator/build.xml", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<project name=\"calculator\" default=\"dist\" basedir=\".\">\n\n <description>\n Risiko Calculator Build File\n </description>\n\n <property name=\"build\" location=\"build\"/>\n <property name=\"externals\" location=\"app/static/externals/\"/>\n <property name=\"app.proxy.geoserver\" value=\"http://localhost:8001/geoserver-geonode-dev\"/>\n <property name=\"app.proxy.geonode\" value=\"http://localhost:8000\"/>\n <property name=\"app.port\" value=\"8080\"/>\n\n <target name=\"init\">\n\n <mkdir dir=\"${build}\"/>\n\n <echo message=\"pulling in ringo\"/>\n <get src=\"http://cloud.github.com/downloads/ringo/ringojs/ringojs-0.7.zip\" dest=\"${build}/ringojs-0.7.zip\"/>\n <unzip src=\"${build}/ringojs-0.7.zip\" dest=\"${build}\"/>\n <move todir=\"${build}/ringo\">\n <fileset dir=\"${build}/ringojs-0.7\"/>\n </move>\n <!-- uncomment this if you have additional jars in a lib dir\n <copy todir=\"${build}/ringo/lib\" flatten=\"true\" includeEmptyDirs=\"false\">\n <fileset dir=\"lib\"/>\n </copy>\n -->\n\n <echo message=\"pulling in buildkit (ignore fatal warning)\"/>\n <mkdir dir=\"${build}/buildkit\"/>\n <exec executable=\"git\" dir=\"${build}/buildkit\" failonerror=\"false\">\n <arg line=\"clone -nq git://github.com/tschaub/buildkit.git .\"/>\n </exec>\n <exec executable=\"git\" dir=\"${build}/buildkit\" failonerror=\"true\">\n <arg line=\"pull origin master\"/>\n </exec>\n <exec executable=\"git\" dir=\"${build}/buildkit\" failonerror=\"true\">\n <arg line=\"checkout 86eacbd7b3bf62b8e99f46911dd5a633464b8570\"/>\n </exec>\n <echo message=\"pulling in js dependencies\"/>\n <get src=\"https://github.com/openlayers/openlayers/zipball/master\" dest=\"${build}/openlayers.zip\"/>\n <unzip src=\"${build}/openlayers.zip\" dest=\"${externals}\"/>\n <exec executable=\"sh\" dir=\"${externals}\" failonerror=\"true\">\n <arg line=\"-c 'rm -Rf openlayers &amp;&amp; mv openlayers-openlayers-* openlayers'\"/>\n </exec>\n <get src=\"https://github.com/opengeo/GeoExt/zipball/deac1664bc9722098b171b0d550b5ba02e2abe2a\" dest=\"${build}/geoext.zip\"/>\n <unzip src=\"${build}/geoext.zip\" dest=\"${externals}\"/>\n <exec executable=\"sh\" dir=\"${externals}\" failonerror=\"true\">\n <arg line=\"-c 'rm -Rf geoext &amp;&amp; mv opengeo-GeoExt-* geoext'\"/>\n </exec>\n <get src=\"https://github.com/opengeo/gxp/zipball/5f0978297ca74d48bb6b5fc33bc3ca26cfda5aad\" dest=\"${build}/gxp.zip\"/>\n <unzip src=\"${build}/gxp.zip\" dest=\"${externals}\"/>\n <exec executable=\"sh\" dir=\"${externals}\" failonerror=\"true\">\n <arg line=\"-c 'rm -Rf gxp &amp;&amp; mv opengeo-gxp-* gxp'\"/>\n </exec>\n\n </target>\n \n <target name=\"buildjs\">\n\n <mkdir dir=\"${build}/${ant.project.name}/WEB-INF/app/static/script\"/>\n <java jar=\"${build}/ringo/run.jar\" fork=\"true\" failonerror=\"true\">\n <sysproperty key=\"ringo.home\" path=\"${build}/ringo\"/>\n <arg path=\"${build}/buildkit/lib/buildkit/build.js\"/>\n <arg line=\"-o ${build}/${ant.project.name}/WEB-INF/app/static/script buildjs.cfg\"/>\n </java>\n\n </target>\n \n <target name=\"dist\" depends=\"buildjs, redist, deploy\"/>\n \n <target name=\"redist\">\n\n <mkdir dir=\"${build}/${ant.project.name}\"/>\n \n <copy todir=\"${build}/${ant.project.name}\">\n <fileset dir=\"src/main/webapp/\">\n </fileset>\n </copy>\n <copy todir=\"${build}/${ant.project.name}/WEB-INF/lib\" flatten=\"true\" includeEmptyDirs=\"false\">\n <fileset dir=\"${build}/ringo/lib\"/>\n </copy>\n <copy todir=\"${build}/${ant.project.name}/WEB-INF/app\">\n <fileset dir=\"app\">\n <exclude name=\"**/externals/**\"/>\n <exclude name=\"**/script/**\"/>\n </fileset>\n </copy>\n <!-- copy Ext resources -->\n <copy todir=\"${build}/${ant.project.name}/WEB-INF/app/static/externals/ext\">\n <fileset dir=\"app/static/externals/ext\"/>\n </copy>\n <!-- copy OpenLayers resources -->\n <copy todir=\"${build}/${ant.project.name}/WEB-INF/app/static/externals/openlayers/theme\">\n <fileset dir=\"app/static/externals/openlayers/theme\"/>\n </copy>\n <!-- copy GeoExt resources -->\n <copy todir=\"${build}/${ant.project.name}/WEB-INF/app/static/externals/geoext/resources\">\n <fileset dir=\"app/static/externals/geoext/resources\"/>\n </copy>\n <!-- copy gxp resources -->\n <copy todir=\"${build}/${ant.project.name}/WEB-INF/app/static/externals/gxp/src/theme\">\n <fileset dir=\"app/static/externals/gxp/src/theme\"/>\n </copy>\n </target>\n\n <target name=\"deploy\">\n <mkdir dir=\"${build}/../../../impact/static/\"/>\n <delete dir=\"${build}../../../impact/static/calculator\"/>\n <mkdir dir=\"${build}/../../../impact/static/calculator\"/>\n <copy todir=\"${build}../../../impact/static/calculator\">\n <fileset dir=\"${build}/${ant.project.name}/WEB-INF/app/static\"/>\n </copy>\n </target>\n\n <target name=\"debug\">\n <java jar=\"${build}/ringo/run.jar\" fork=\"true\">\n <sysproperty key=\"app.proxy.geoserver\" value=\"${app.proxy.geoserver}\"/>\n <sysproperty key=\"app.proxy.geonode\" value=\"${app.proxy.geonode}\"/>\n <sysproperty key=\"ringo.home\" path=\"${build}/ringo\"/>\n <arg path=\"app/main.js\"/>\n <arg line=\"-p ${app.port}\"/>\n </java>\n </target>\n\n <target name=\"debugall\">\n <java jar=\"${build}/ringo/run.jar\" fork=\"true\">\n <sysproperty key=\"app.proxy.geoserver\" value=\"${app.proxy.geoserver}\"/>\n <sysproperty key=\"ringo.home\" path=\"${build}/ringo\"/>\n <arg value=\"-d\"/>\n <arg path=\"app/main.js\"/>\n <arg line=\"-p ${app.port}\"/>\n </java>\n </target>\n\n <target name=\"static-war\" depends=\"dist\">\n <war destfile=\"${build}/${ant.project.name}.war\" needxmlfile=\"false\">\n <fileset dir=\"${build}/${ant.project.name}/WEB-INF/app/static\"/>\n </war>\n </target>\n\n <target name=\"war\" depends=\"dist\">\n <war destfile=\"${build}/${ant.project.name}.war\">\n <fileset dir=\"${build}/${ant.project.name}\"/>\n </war>\n </target>\n\n <target name=\"clean\" description=\"remove previous build and all dependencies\">\n <delete dir=\"${build}\"/>\n </target>\n\n</project>\n" }, { "alpha_fraction": 0.5190014839172363, "alphanum_fraction": 0.5335320234298706, "avg_line_length": 32.54999923706055, "blob_id": "49cf3c20ba97ca3cb1b9932684fee1042fb34db3", "content_id": "a288997020eb163676a78ab3d85408a5f187f474", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2684, "license_type": "no_license", "max_line_length": 78, "num_lines": 80, "path": "/impact/plugins/earthquake/population_exposure.py", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "from impact.plugins.core import FunctionProvider\nfrom impact.storage.raster import Raster\nimport numpy\n\n\nclass EarthquakePopulationExposureFunction(FunctionProvider):\n \"\"\"Population Exposure to ground shaking\n\n :author Ole Nielsen\n :rating 3\n :param requires category=='hazard' and \\\n subcategory.startswith('earthquake') and \\\n layer_type=='raster'\n :param requires category=='exposure' and \\\n subcategory.startswith('population') and \\\n layer_type=='raster'\n \"\"\"\n\n @staticmethod\n def run(layers):\n \"\"\"Calculate population exposed to different levels of ground shaking\n\n Input\n layers: List of layers expected to contain\n H: Raster layer of MMI ground shaking\n P: Raster layer of population density\n \"\"\"\n\n # Identify input layers\n intensity = layers[0]\n population = layers[1]\n\n # Extract data\n H = intensity.get_data(nan=0)\n P = population.get_data(nan=0)\n\n # Calculate exposure to MMI impact\n mmi_classes = range(1, 11) # MMI classes considered (1-10)\n\n # Form result as keyword strings\n mmi_str = str(mmi_classes)[1:-1] # Get rid of []\n count_str = ''\n\n for i in mmi_classes:\n # Identify cells where MMI is in class i\n mask = (H >= i - 0.5) * (H < i + 0.5)\n\n # Count population affected by this shake level\n count = round(numpy.nansum(P[mask]))\n if numpy.isnan(count):\n count = 0\n\n # Update keyword string\n count_str += '%i ' % count\n\n # Calculate fatality map (FIXME (Ole): Need to replaced by USGS model)\n a = 0.97429\n b = 11.037\n F = 10 ** (a * H - b) * P\n\n # Generate text with result for this study\n count = numpy.nansum(F.flat)\n total = numpy.nansum(P.flat)\n\n # Create report\n caption = ('<table border=\"0\" width=\"320px\">'\n ' <tr><td>%s&#58;</td><td>%i</td></tr>'\n ' <tr><td>%s&#58;</td><td>%i</td></tr>'\n '</table>' % ('Jumlah Penduduk', int(total),\n 'Perkiraan Orang Meninggal', int(count)))\n\n # Create new layer and return\n R = Raster(F,\n projection=population.get_projection(),\n geotransform=population.get_geotransform(),\n name='Estimated fatalities',\n keywords={'caption': caption,\n 'mmi-classes': mmi_str,\n 'affected-population': count_str})\n return R\n" }, { "alpha_fraction": 0.6302729249000549, "alphanum_fraction": 0.6526054739952087, "avg_line_length": 25.733333587646484, "blob_id": "f03be72eaaf3355cfcc7d2a9c4d0f041f3a043b3", "content_id": "05fa3145d9a43a4178155eeaf0d2478af1cf5f65", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 403, "license_type": "no_license", "max_line_length": 73, "num_lines": 15, "path": "/docs/intro/faq.rst", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "Frequently Asked Questions\n==========================\n\n\n\nHow do I rename a shape file and all the helper files?\n::\n Use the rename command. rename [ -v ] [ -n ] [ -f ] perlexpr [ files ].\n For example\n rename -v 's/^building/OSM_building_polygons_20110905/' building.*\n\n\nMy Risiko production server is live but no map shows?\n::\n Try to login and restart tomcat: sudo /etc/init.d/tomcat6 restart\n\n\n" }, { "alpha_fraction": 0.4772461950778961, "alphanum_fraction": 0.496693879365921, "avg_line_length": 36.5328483581543, "blob_id": "188cf6cbbfd891879c19a06b7e242f09c2c54e67", "content_id": "df8f30be6171a26ef88caf495c0e2695d3cc9ec4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5142, "license_type": "no_license", "max_line_length": 74, "num_lines": 137, "path": "/impact/plugins/flood/flood_building_impact.py", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "from django.template.loader import render_to_string\nfrom impact.plugins.core import FunctionProvider\nfrom impact.storage.vector import Vector\nfrom django.utils.translation import ugettext as _\nfrom impact.plugins.utilities import PointZoomSize\nfrom impact.plugins.utilities import PointClassColor\nfrom impact.plugins.utilities import PointSymbol\nimport scipy.stats\n\n\nclass FloodBuildingImpactFunction(FunctionProvider):\n \"\"\"Risk plugin for flood impact on building data\n\n :param requires category=='hazard' and \\\n subcategory.startswith('flood') and \\\n layer_type=='raster' and \\\n unit=='m'\n :param requires category=='exposure' and \\\n subcategory.startswith('building')\n \"\"\"\n\n target_field = 'AFFECTED'\n\n def run(self, layers):\n \"\"\"Risk plugin for tsunami population\n \"\"\"\n\n # Extract data\n # FIXME (Ole): This will be replaced by a helper function\n # to separate hazard from exposure using keywords\n H = layers[0] # Depth\n E = layers[1] # Building locations\n\n # Interpolate hazard level to building locations\n H = H.interpolate(E)\n\n # Extract relevant numerical data\n coordinates = E.get_geometry()\n depth = H.get_data()\n N = len(depth)\n\n # List attributes to carry forward to result layer\n attributes = E.get_attribute_names()\n\n #print attributes\n #print 'Number of population points', N\n\n # Calculate population impact\n count = 0\n building_impact = []\n for i in range(N):\n dep = float(depth[i].values()[0])\n\n # Tag and count\n if dep > 0.1:\n affected = 99.5\n count += 1\n else:\n affected = 0\n\n # Collect depth and calculated damage\n result_dict = {'AFFECTED': affected,\n 'DEPTH': dep}\n\n # Carry all original attributes forward\n for key in attributes:\n result_dict[key] = E.get_data(key, i)\n\n # Record result for this feature\n building_impact.append(result_dict)\n\n # Create report\n caption = ('<table border=\"0\" width=\"320px\">'\n ' <tr><th><b>%s</b></th><th><b>%s</b></th></th>'\n ' <tr></tr>'\n ' <tr><td>%s&#58;</td><td>%i</td></tr>'\n ' <tr><td>%s (> 10 cm) &#58;</td><td>%i</td></tr>'\n ' <tr><td>%s (< 10 cm) &#58;</td><td>%i</td></tr>'\n '</table>' % (_('Buildings'), _('Total'),\n _('All'), N,\n _('Inundated'), count,\n _('Not inundated'), N - count))\n\n # Create vector layer and return\n V = Vector(data=building_impact,\n projection=E.get_projection(),\n geometry=coordinates,\n name='Estimated buildings affected',\n keywords={'caption': caption})\n return V\n\n def generate_style(self, data):\n \"\"\"Generates and SLD file based on the data values\n \"\"\"\n\n DEFAULT_SYMBOL = 'circle'\n\n symbol_field = None\n symbol_keys = [None, '']\n symbol_values = [DEFAULT_SYMBOL, DEFAULT_SYMBOL]\n\n scale_keys = [10000000000, 10000000, 5000000, 1000000,\n 500000, 250000, 100000]\n scale_values = [5, 5, 5, 5, 5, 8, 14]\n\n class_keys = ['Not affected', 'Greater than 10 cm']\n class_values = [{'min': 0, 'max': 90,\n 'color': '#cccccc', 'opacity': '0.2'},\n {'min': 90, 'max': 100,\n 'color': '#F31a0c', 'opacity': '1'}]\n\n if self.symbol_field in data.get_attribute_names():\n symbol_field = self.symbol_field\n\n symbol_keys.extend(['Church/Mosque', 'Commercial (office)',\n 'Hotel',\n 'Medical facility', 'Other',\n 'Other industrial',\n 'Residential', 'Retail', 'School',\n 'Unknown', 'Warehouse'])\n\n symbol_values.extend([DEFAULT_SYMBOL, DEFAULT_SYMBOL,\n DEFAULT_SYMBOL,\n DEFAULT_SYMBOL, DEFAULT_SYMBOL,\n DEFAULT_SYMBOL,\n DEFAULT_SYMBOL, DEFAULT_SYMBOL,\n DEFAULT_SYMBOL,\n DEFAULT_SYMBOL, DEFAULT_SYMBOL])\n\n params = dict(name=data.get_name(),\n damage_field=self.target_field,\n symbol_field=symbol_field,\n symbols=dict(zip(symbol_keys, symbol_values)),\n scales=dict(zip(scale_keys, scale_values)),\n classifications=dict(zip(class_keys, class_values)))\n\n return render_to_string('impact/styles/point_classes.sld', params)\n" }, { "alpha_fraction": 0.6049073338508606, "alphanum_fraction": 0.6149224042892456, "avg_line_length": 31.91758155822754, "blob_id": "02fbe7b137ddeabe336d9839a267a5776d08f8c1", "content_id": "7b1ac07973b3b8744edc2850a5638245368a280c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5991, "license_type": "no_license", "max_line_length": 104, "num_lines": 182, "path": "/setup.py", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#from distutils.core import setup, Command\nfrom setuptools import setup, Command\nfrom distutils.command.install_data import install_data\nfrom distutils.command.install import INSTALL_SCHEMES\nimport os\nimport sys\nimport codecs\n\n\nclass osx_install_data(install_data):\n # On MacOS, the platform-specific lib dir is\n # /System/Library/Framework/Python/.../\n # which is wrong. Python 2.5 supplied with MacOS 10.5 has an\n # Apple-specific fix\n # for this in distutils.command.install_data#306. It fixes install_lib\n # but not\n # install_data, which is why we roll our own install_data class.\n\n def finalize_options(self):\n # By the time finalize_options is called, install.install_lib is\n # set to the\n # fixed directory, so we set the installdir to install_lib. The\n # install_data class uses ('install_data', 'install_dir') instead.\n self.set_undefined_options('install', ('install_lib', 'install_dir'))\n install_data.finalize_options(self)\n\nif sys.platform == 'darwin':\n cmdclasses = {'install_data': osx_install_data}\nelse:\n cmdclasses = {'install_data': install_data}\n\n\ndef fullsplit(path, result=None):\n \"\"\"\n Split a pathname into components (the opposite of os.path.join) in a\n platform-neutral way.\n \"\"\"\n if result is None:\n result = []\n head, tail = os.path.split(path)\n if head == '':\n return [tail] + result\n if head == path:\n return result\n return fullsplit(head, [tail] + result)\n\n\n# Tell distutils to put the data_files in platform-specific installation\n# locations. See here for an explanation:\n# http://groups.google.com/group/comp.lang.python/browse_thread/thread/35ec7b2fed36eaec/2105ee4d9e8042cb\nfor scheme in INSTALL_SCHEMES.values():\n scheme['data'] = scheme['purelib']\n\n# Compile the list of packages available, because distutils doesn't have\n# an easy way to do this.\npackages, data_files = [], []\nroot_dir = os.path.dirname(__file__)\nif root_dir != '':\n os.chdir(root_dir)\n\ndef add_dir(source_dir):\n for dirpath, dirnames, filenames in os.walk(source_dir):\n # Ignore dirnames that start with '.'\n for i, dirname in enumerate(dirnames):\n if dirname.startswith('.'): del dirnames[i]\n if '__init__.py' in filenames:\n packages.append('.'.join(fullsplit(dirpath)))\n elif filenames:\n data_files.append([dirpath,\n [os.path.join(dirpath, f) for f in filenames]])\n\nadd_dir('risiko')\nadd_dir('impact')\n\n# Small hack for working with bdist_wininst.\n# See http://mail.python.org/pipermail/distutils-sig/2004-August/004134.html\nif len(sys.argv) > 1 and sys.argv[1] == 'bdist_wininst':\n for file_info in data_files:\n file_info[0] = '\\\\PURELIB\\\\%s' % file_info[0]\n\n# Dynamically calculate the version based on risiko.VERSION.\ndistmeta = __import__('risiko')\n\nclass RunTests(Command):\n \"\"\"Overall test runner for the project\n \"\"\"\n\n description = 'Run the django test suite from the testproj dir.'\n\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n this_dir = os.getcwd()\n testproj_dir = os.path.join(this_dir, 'risiko')\n os.chdir(testproj_dir)\n sys.path.append(testproj_dir)\n from django.core.management import execute_manager\n os.environ['DJANGO_SETTINGS_MODULE'] = os.environ.get(\n 'DJANGO_SETTINGS_MODULE', 'settings')\n settings_file = os.environ['DJANGO_SETTINGS_MODULE']\n settings_mod = __import__(settings_file, {}, {}, [''])\n execute_manager(settings_mod, argv=[\n __file__, 'test'])\n os.chdir(this_dir)\n\n\nif os.path.exists('README.rst'):\n long_description = codecs.open('README.rst', 'r', 'utf-8').read()\nelse:\n long_description = 'See http://github.com/AIFDR/riab'\n\n\nsetup(\n name='risiko',\n version=distmeta.__version__,\n description=distmeta.__doc__,\n author=distmeta.__author__,\n author_email=distmeta.__contact__,\n url=distmeta.__homepage__,\n platforms=['any'],\n license=distmeta.__license__,\n packages=packages,\n data_files=data_files,\n #zip_safe=False,\n# install_requires=[\n # For the core, installed with apt-get\n # 'numpy', 'scipy', 'gdal',\n\n # for the web api\n# 'Django==1.3',\n\n # for the storage\n# 'owslib',\n\n # for the documentation\n# 'sphinx',\n\n # for testing\n# 'nose>=0.11', 'coverage>=3.4', 'unittest2>=0.4.0',\n# 'nose-cover3', 'mock', 'django-nose',\n\n # for improving source code quality\n# 'pylint', 'pep8'],\n cmdclass = {'test': RunTests},\n scripts = ['scripts/risiko',\n 'scripts/risiko-test',\n 'scripts/risiko-start',\n 'scripts/risiko-stop',\n 'scripts/risiko-clean',\n 'scripts/risiko-lint',\n 'scripts/risiko-upload',\n 'scripts/risiko-uninstall',\n 'scripts/risiko-js',\n ],\n classifiers = [\n 'Development Status :: 2 - Pre-Alpha',\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GPL License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.5',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content',\n 'Topic :: Internet :: WWW/HTTP :: WSGI',\n 'Topic :: Scientific/Engineering :: GIS',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n long_description=long_description,\n)\n" }, { "alpha_fraction": 0.39148351550102234, "alphanum_fraction": 0.4352433383464813, "avg_line_length": 37.029850006103516, "blob_id": "5851c4bda54464bd0b3f043b3b25ca2e3e61fa2a", "content_id": "45fec1bfa8d0c7797060af44d3a47fc54aa6967f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5096, "license_type": "no_license", "max_line_length": 80, "num_lines": 134, "path": "/impact/plugins/tephra/general_ashload_impact.py", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "from django.template.loader import render_to_string\nfrom impact.plugins.core import FunctionProvider\nfrom impact.storage.vector import Vector\n\n# FIXME: Need style for this and allow the name to\n# be different from Percen_da\n\n\nclass TephraImpactFunction(FunctionProvider):\n \"\"\"Risk plugin for tephra damage (FIXME: Origin?)\n\n :param requires category=='hazard' and \\\n subcategory.startswith('tephra') and \\\n layer_type=='raster'\n :param requires category=='exposure' and \\\n subcategory.startswith('building') and \\\n layer_type=='vector'\n \"\"\"\n\n @staticmethod\n def run(layers):\n \"\"\"Risk plugin for tephra impact\n \"\"\"\n\n # Extract data\n H = layers[0] # Ash load\n E = layers[1] # Building locations\n\n # Interpolate hazard level to building locations\n H = H.interpolate(E, 'load')\n\n # Calculate building damage\n count3 = 0\n count2 = 0\n count1 = 0\n count0 = 0\n result = []\n for i in range(len(E)):\n\n #-------------------\n # Extract parameters\n #-------------------\n load = H.get_data('load', i)\n\n #------------------------\n # Compute damage level\n #------------------------\n\n # FIXME: The thresholds have been greatly reduced\n # for the purpose of demonstration. Any real analyis\n # should bring them back to 0, 90, 150, 300\n if 0.01 <= load < 0.5:\n # Loss of crops and livestock\n impact = 0\n count0 += 1\n elif 0.5 <= load < 2.0:\n # Cosmetic damage\n impact = 1\n count1 += 1\n elif 2.0 <= load < 10.0:\n # Partial building collapse\n impact = 2\n count2 += 1\n elif load >= 10.0:\n # Complete building collapse\n impact = 3\n count3 += 1\n else:\n impact = 0\n count0 += 1\n\n result.append({'DAMAGE': impact, 'ASHLOAD': load})\n\n # Create report\n caption = ('<font size=\"3\"> <table border=\"0\" width=\"320px\">'\n ' <tr><th><b>%s</b></th><th><b>%s</b></th></th>'\n ' <tr></tr>'\n ' <tr><td>%s&#58;</td><td>%i</td></tr>'\n ' <tr><td>%s&#58;</td><td>%i</td></tr>'\n ' <tr><td>%s&#58;</td><td>%i</td></tr>'\n ' <tr><td>%s&#58;</td><td>%i</td></tr>'\n '</table></font>' % ('Beban abu', 'Gedung dampak',\n '< 0.5 kg/m2', count0,\n '0.5 - 2 kg/m2', count1,\n '2 - 10 kg/m2', count2,\n '> 10 kg/m2', count3))\n #'</table>' % ('Beban abu', 'Gedung dampak',\n # 'Gangguan (< 90 kg/m2)', count0,\n # 'Kerusakan kosmetik (90 - 150 kg/m2', count1,\n # 'parsial runtuhnya (150 - 300 kg/m2', count2,\n # 'runtuhnya lengkap (> 300 kg/m2', count3))\n\n V = Vector(data=result,\n projection=E.get_projection(),\n geometry=E.get_geometry(),\n name='Estimated ashload damage',\n keywords={'caption': caption})\n return V\n\n def generate_style(self, data):\n \"\"\"Generates and SLD file based on the data values\n \"\"\"\n\n DEFAULT_SYMBOL = 'square'\n\n symbol_field = None\n symbol_keys = [None, '']\n symbol_values = [DEFAULT_SYMBOL, DEFAULT_SYMBOL]\n\n # Zoom levels (large number means close up)\n scale_keys = [50000000000, 10000000000, 10000000, 5000000,\n 1000000, 500000, 250000, 100000]\n scale_values = [2, 4, 6, 8, 1, 1, 1, 1]\n\n # Predefined colour classes\n class_keys = ['< 90 kg/m2', '90 - 150 kg/m2',\n '150 - 300 kg/m2', '> 300 kg/m2']\n class_values = [{'min': -0.5, 'max': 0.5,\n 'color': '#cccccc', 'opacity': '1'},\n {'min': 0.5, 'max': 1.5,\n 'color': '#0EEC6C', 'opacity': '1'},\n {'min': 1.5, 'max': 2.5,\n 'color': '#FD8D3C', 'opacity': '1'},\n {'min': 2.5, 'max': 3.5,\n 'color': '#F31A1C', 'opacity': '1'}]\n\n params = dict(name=data.get_name(),\n damage_field=self.target_field,\n symbol_field=symbol_field,\n symbols=dict(zip(symbol_keys, symbol_values)),\n scales=dict(zip(scale_keys, scale_values)),\n classifications=dict(zip(class_keys, class_values)))\n\n return render_to_string('impact/styles/point_classes.sld', params)\n" }, { "alpha_fraction": 0.5412269234657288, "alphanum_fraction": 0.5586223006248474, "avg_line_length": 34.485595703125, "blob_id": "f81b5c96e70384590e33add599d20eb2e54f7979", "content_id": "910384bed5d4b273a2b207b75b3dd6c03a8e5233", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8623, "license_type": "no_license", "max_line_length": 195, "num_lines": 243, "path": "/impact/plugins/earthquake/fatality_model_podes.py", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "\"\"\"Fatality model using BNPB Podes data.\n\nThis was obtained from the WFS server\n\nhttp://gisserver.bnpb.go.id:8399/arcgis/rest/services\nhttp://gisserver.bnpb.go.id:8399/arcgis/rest/services/demografi\nhttp://gisserver.bnpb.go.id:8399/arcgis/rest/services/demografi/Populasi_Penduduk_Kecamatan/FeatureServer?f=pjson\n\nusing wget, followed by processing the gml to swap coordinates using the script\nhttps://github.com/AIFDR/riab/blob/develop/extras/swap_gml_coords.py\n\nFinally, it was converted to the .shp format.\n\nEventually, this will be scripted and eventually work directly with the WFS\n- see https://github.com/AIFDR/riab/issues/62\n\"\"\"\n\nimport numpy\nfrom impact.plugins.core import FunctionProvider\nfrom impact.storage.raster import Raster\nfrom impact.storage.vector import Vector\n\n\nclass EarthquakeFatalityFunctionPodes(FunctionProvider):\n \"\"\"Risk plugin for earthquake fatalities using Podes polygon data\n\n :author Allen\n :rating 1\n :param requires category=='hazard' and \\\n subcategory.startswith('earthquake') and \\\n layer_type=='raster'\n :param requires category=='exposure' and \\\n subcategory.startswith('population') and \\\n layer_type=='vector' and \\\n geometry=='polygon'\n \"\"\"\n\n target_field = 'FATALITIES'\n\n def run(self, layers,\n a=0.97429, b=11.037):\n \"\"\"Risk plugin for earthquake fatalities\n\n Input\n layers: List of layers expected to contain\n H: Raster layer of MMI ground shaking\n E: Polygon population data\n a: Parameter for Allen impact function\n b: Parameter for Allen impact function\n \"\"\"\n\n # Identify input layers\n H = layers[0] # Intensity\n E = layers[1] # Exposure - population counts\n\n # Interpolate hazard level to building locations\n H = H.interpolate(E)\n\n # Extract relevant numerical data\n coordinates = E.get_geometry() # Stay with polygons\n shaking = H.get_data()\n N = len(shaking)\n\n # List attributes to carry forward to result layer\n attributes = E.get_attribute_names()\n\n # Calculate fatilities\n count = 0\n total = 0\n\n result_feature_set = []\n for i in range(N):\n mmi = float(shaking[i].values()[0])\n if mmi < 0.0:\n # FIXME: Hack until interpolation is fixed\n mmi = 0.0\n\n population_count = E.get_data('Jumlah_Pen', i)\n\n # Calculate impact\n F = 10 ** (a * mmi - b) * population_count\n\n # Collect shake level and calculated damage\n result_dict = {self.target_field: F,\n 'MMI': mmi}\n\n # Carry all orginal attributes forward\n for key in attributes:\n result_dict[key] = E.get_data(key, i)\n\n # Record result for this feature\n result_feature_set.append(result_dict)\n\n # Calculate statistics\n if not numpy.isnan(F):\n count += F\n total += population_count\n\n # Create report\n caption = ('<table border=\"0\" width=\"320px\">'\n ' <tr><td>%s&#58;</td><td>%i</td></tr>'\n ' <tr><td>%s&#58;</td><td>%i</td></tr>'\n '</table>' % ('Jumlah Penduduk', int(total),\n 'Perkiraan Orang Meninggal', int(count)))\n\n # Create vector layer and return\n V = Vector(data=result_feature_set,\n projection=E.get_projection(),\n geometry=coordinates,\n name='Estimated fatalities',\n keywords={'caption': caption})\n\n return V\n\n def generate_style(self, data):\n \"\"\"Generates a polygon SLD file based on the data values\n \"\"\"\n\n # FIXME (Ole): Return static style to start with: ticket #144\n style = \"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<sld:StyledLayerDescriptor xmlns=\"http://www.opengis.net/sld\" xmlns:sld=\"http://www.opengis.net/sld\" xmlns:ogc=\"http://www.opengis.net/ogc\" xmlns:gml=\"http://www.opengis.net/gml\" version=\"1.0.0\">\n <sld:NamedLayer>\n <sld:Name>podes_sub_district</sld:Name>\n <sld:UserStyle>\n <sld:Name>podes_sub_district</sld:Name>\n <sld:Title/>\n <sld:FeatureTypeStyle>\n <sld:Name>name</sld:Name>\n <sld:Rule>\n <sld:Name>2</sld:Name>\n <sld:Title>0 to 2.0</sld:Title>\n <ogc:Filter>\n <ogc:PropertyIsLessThan>\n <ogc:PropertyName>FATALITIES</ogc:PropertyName>\n <ogc:Literal>2.0</ogc:Literal>\n </ogc:PropertyIsLessThan>\n </ogc:Filter>\n <sld:PolygonSymbolizer>\n <sld:Fill>\n <sld:CssParameter name=\"fill\">#FFFFBE</sld:CssParameter>\n </sld:Fill>\n <sld:Stroke>\n <sld:CssParameter name=\"stroke\">#000000</sld:CssParameter>\n </sld:Stroke>\n </sld:PolygonSymbolizer>\n </sld:Rule>\n <sld:Rule>\n <sld:Name>10</sld:Name>\n <sld:Title>2.1 to 10</sld:Title>\n <ogc:Filter>\n <ogc:And>\n <ogc:PropertyIsGreaterThanOrEqualTo>\n <ogc:PropertyName>FATALITIES</ogc:PropertyName>\n <ogc:Literal>2.0</ogc:Literal>\n </ogc:PropertyIsGreaterThanOrEqualTo>\n <ogc:PropertyIsLessThan>\n <ogc:PropertyName>FATALITIES</ogc:PropertyName>\n <ogc:Literal>10</ogc:Literal>\n </ogc:PropertyIsLessThan>\n </ogc:And>\n </ogc:Filter>\n <sld:PolygonSymbolizer>\n <sld:Fill>\n <sld:CssParameter name=\"fill\">#F5B800</sld:CssParameter>\n </sld:Fill>\n <sld:Stroke>\n <sld:CssParameter name=\"stroke\">#000000</sld:CssParameter>\n </sld:Stroke>\n </sld:PolygonSymbolizer>\n </sld:Rule>\n <sld:Rule>\n <sld:Name>25</sld:Name>\n <sld:Title>10.1 to 25</sld:Title>\n <ogc:Filter>\n <ogc:And>\n <ogc:PropertyIsGreaterThanOrEqualTo>\n <ogc:PropertyName>FATALITIES</ogc:PropertyName>\n <ogc:Literal>10.0</ogc:Literal>\n </ogc:PropertyIsGreaterThanOrEqualTo>\n <ogc:PropertyIsLessThan>\n <ogc:PropertyName>FATALITIES</ogc:PropertyName>\n <ogc:Literal>25</ogc:Literal>\n </ogc:PropertyIsLessThan>\n </ogc:And>\n </ogc:Filter>\n <sld:PolygonSymbolizer>\n <sld:Fill>\n <sld:CssParameter name=\"fill\">#F57A00</sld:CssParameter>\n </sld:Fill>\n <sld:Stroke>\n <sld:CssParameter name=\"stroke\">#000000</sld:CssParameter>\n </sld:Stroke>\n </sld:PolygonSymbolizer>\n </sld:Rule>\n <sld:Rule>\n <sld:Name>50</sld:Name>\n <sld:Title>25.1 to 50</sld:Title>\n <ogc:Filter>\n <ogc:And>\n <ogc:PropertyIsGreaterThanOrEqualTo>\n <ogc:PropertyName>FATALITIES</ogc:PropertyName>\n <ogc:Literal>25.0</ogc:Literal>\n </ogc:PropertyIsGreaterThanOrEqualTo>\n <ogc:PropertyIsLessThan>\n <ogc:PropertyName>FATALITIES</ogc:PropertyName>\n <ogc:Literal>50</ogc:Literal>\n </ogc:PropertyIsLessThan>\n </ogc:And>\n </ogc:Filter>\n <sld:PolygonSymbolizer>\n <sld:Fill>\n <sld:CssParameter name=\"fill\">#F53D00</sld:CssParameter>\n </sld:Fill>\n <sld:Stroke>\n <sld:CssParameter name=\"stroke\">#000000</sld:CssParameter>\n </sld:Stroke>\n </sld:PolygonSymbolizer>\n </sld:Rule>\n <sld:Rule>\n <sld:Name>50</sld:Name>\n <sld:Title>2.1 to 10</sld:Title>\n <ogc:Filter>\n <ogc:PropertyIsGreaterThanOrEqualTo>\n <ogc:PropertyName>FATALITIES</ogc:PropertyName>\n <ogc:Literal>50.0</ogc:Literal>\n </ogc:PropertyIsGreaterThanOrEqualTo>\n </ogc:Filter>\n <sld:PolygonSymbolizer>\n <sld:Fill>\n <sld:CssParameter name=\"fill\">#A80000</sld:CssParameter>\n </sld:Fill>\n <sld:Stroke>\n <sld:CssParameter name=\"stroke\">#000000</sld:CssParameter>\n </sld:Stroke>\n </sld:PolygonSymbolizer>\n </sld:Rule>\n </sld:FeatureTypeStyle>\n </sld:UserStyle>\n </sld:NamedLayer>\n</sld:StyledLayerDescriptor>\n\"\"\"\n\n return style\n" }, { "alpha_fraction": 0.5732728242874146, "alphanum_fraction": 0.5772062540054321, "avg_line_length": 27.491378784179688, "blob_id": "5f6a2e97bb353197bd1d12035155a43c75fb49b0", "content_id": "ae0652f16a16c20e829803b7e54fad8fab05de13", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9915, "license_type": "no_license", "max_line_length": 95, "num_lines": 348, "path": "/risiko/settings.py", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "# Django settings for GeoNode project.\nimport os\nimport geonode\n\nDEBUG = True\nSITENAME = 'Risk In A Box'\nSITEURL = 'http://localhost:8000/'\nTEMPLATE_DEBUG = DEBUG\n\nPROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))\nGEONODE_ROOT = os.path.dirname(geonode.__file__)\n\nADMINS = (\n # ('Your Name', '[email protected]'),\n)\n\nMANAGERS = ADMINS\n\nDATABASES = {'default': {'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(PROJECT_ROOT, 'development.db'),\n 'TEST_NAME': os.path.join(PROJECT_ROOT,\n 'development.db')}}\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# If running in a Windows environment this must be set to the same as your\n# system time zone.\nTIME_ZONE = 'America/Chicago'\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'en'\n\nLANGUAGES = (\n ('id', 'Bahasa Indonesia'),\n ('en', 'English'),\n ('es', 'Espanol'),\n ('fr', 'Francais'),\n ('it', 'Italian'),\n)\n\nSITE_ID = 1\n\n# Setting a custom test runner to avoid running the tests for\n# some problematic 3rd party apps\nTEST_RUNNER = 'django_nose.NoseTestSuiteRunner'\n\nNOSE_ARGS = [\n# '--failed',\n '--stop',\n '--verbosity=2',\n '--with-doctest',\n '--nocapture',\n '--with-coverage',\n '--cover-package=risiko,impact',\n '--cover-inclusive',\n '--cover-tests',\n '--cover-erase',\n '--detailed-errors',\n# '--with-xunit',\n# '--with-pdb',\n ]\n\n#COVERAGE_EXCLUDE_MODULES = ('geonode',)\n\n#NOSE_PLUGINS = [\n# ]\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# Absolute path to the directory that holds media.\n# Example: '/home/media/media.lawrence.com/'\nMEDIA_ROOT = os.path.join(PROJECT_ROOT, 'static', 'uploaded')\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash if there is a path component (optional in other cases).\n# Examples: 'http://media.lawrence.com', 'http://example.com/media/'\nMEDIA_URL = '/static/uploaded/'\n\n# Absolute path to the directory that holds static files like app media.\n# Example: '/home/media/media.lawrence.com/apps/'\nSTATIC_ROOT = os.path.join(PROJECT_ROOT, 'static')\n\n# URL that handles the static files like app media.\n# Example: 'http://media.lawrence.com'\nSTATIC_URL = '/static/'\n\n\nGEONODE_CLIENT_LOCATION = os.path.join(STATIC_URL, \"geonode/\")\n\n# Additional directories which hold static files\nSTATICFILES_DIRS = [\n os.path.join(PROJECT_ROOT, 'media'),\n os.path.join(GEONODE_ROOT, \"media\"),\n]\n\nGEONODE_UPLOAD_PATH = MEDIA_ROOT\n\n# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a\n# trailing slash.\n# Examples: 'http://foo.com/media/', '/media/'.\nADMIN_MEDIA_PREFIX = os.path.join(STATIC_URL, 'admin/')\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = 'myv-y4#7j-d*p-__@j#*3z@!y24fz8%^z2v6atuy4bo9vqr1_a'\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n 'django.core.context_processors.auth',\n 'django.core.context_processors.debug',\n 'django.core.context_processors.i18n',\n 'django.core.context_processors.media',\n 'geonode.maps.context_processors.resource_urls',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n)\n\n# This isn't required for running the geonode site,\n# but it when running sites that inherit the\n# geonode.settings module.\nLOCALE_PATHS = (\n os.path.join(PROJECT_ROOT, 'locale'),\n os.path.join(GEONODE_ROOT, 'locale'),\n os.path.join(GEONODE_ROOT, 'maps', 'locale'),\n)\n\nROOT_URLCONF = 'risiko.urls'\n\n# Note that Django automatically includes the 'templates' dir in all the\n# INSTALLED_APPS, se there is no need to add maps/templates or admin/templates\nTEMPLATE_DIRS = (\n os.path.join(PROJECT_ROOT, 'templates'),\n os.path.join(GEONODE_ROOT, 'templates'),\n)\n\n# The FULLY QUALIFIED url to the GeoServer instance for this GeoNode.\nGEOSERVER_BASE_URL = 'http://localhost:8001/geoserver-geonode-dev/'\n\n# The username and password for a user that can add and edit layer\n# details on GeoServer\nGEOSERVER_CREDENTIALS = 'foo', 'bar'\n\nAUTHENTICATION_BACKENDS = ('geonode.core.auth.GranularBackend',)\n\nGOOGLE_API_KEY = ('ABQIAAAAkofooZxTfcCv9Wi3zzGTVxTnme5EwnLVtEDGnh-'\n 'lFVzRJhbdQhQgAhB1eT_2muZtc0dl-ZSWrtzmrw')\nLOGIN_REDIRECT_URL = '/'\n\nDEFAULT_LAYERS_OWNER = 'admin'\n\n# Where should newly created maps be focused?\nDEFAULT_MAP_CENTER = (112.3, -7.9)\n\n# How tightly zoomed should newly created maps be?\n# 0 = entire world;\n# maximum zoom is between 12 and 15 (for Google Maps, coverage varies by area)\nDEFAULT_MAP_ZOOM = 5\n\nMAP_BASELAYERSOURCES = {\n 'any': {'ptype': 'gxp_olsource'},\n 'google': {'ptype': 'gxp_googlesource',\n 'apiKey': GOOGLE_API_KEY},\n 'mapbox': {'ptype': 'gxp_mapboxsource'},\n 'mapquest': {'ptype': 'gxp_mapquestsource'},\n 'bing': {'ptype': 'gxp_bingsource'}\n }\n\nMAP_BASELAYERS = \\\n [{'source': 'any',\n 'type': 'OpenLayers.Layer',\n 'args': ['No background'],\n 'visibility': False,\n 'fixed': True,\n 'group': 'background'},\n {'source':'any',\n 'type': 'OpenLayers.Layer.OSM',\n 'args': ['OpenStreetMap'],\n 'visibility': True,\n 'fixed': True,\n 'group':'background'},\n {'source': 'bing',\n 'group': 'background',\n 'name': 'Aerial',\n 'visibility': False,\n 'fixed': True},\n {'source': 'mapbox',\n 'name': 'blue-marble-topo-bathy-jan',\n 'group': 'background'},\n {'source': 'mapquest',\n 'name': 'osm',\n 'group': 'background',\n 'visibility': False,\n 'fixed': True}\n]\n\n# NAVBAR expects a dict of dicts or a path to an ini file\nNAVBAR = \\\n {'maps': {'id': '%sLink',\n 'item_class': '',\n 'link_class': '',\n 'text': 'Maps',\n 'url': 'geonode.maps.views.maps'},\n 'data': {'id': '%sLink',\n 'item_class': '',\n 'link_class': '',\n 'text': 'Data',\n 'url': 'geonode.maps.views.browse_data'},\n # 'index': {'id': '%sLink',\n # 'item_class': '',\n # 'link_class': '',\n # 'text': 'Featured Map',\n # 'url': 'geonode.views.index'},\n 'master': {'id': '%sLink',\n 'item_class': '',\n 'link_class': '',\n 'text': 'This page has no tab for this navigation'},\n 'meta': {'active_class': 'here',\n 'default_id': '%sLink',\n 'default_item_class': '',\n 'default_link_class': '',\n 'end_class': 'last',\n 'id': '%sLink',\n 'item_class': '',\n 'link_class': '',\n 'visible': 'data\\nmaps'}}\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.admin',\n 'django.contrib.staticfiles',\n 'django_extensions',\n 'registration',\n 'profiles',\n 'avatar',\n 'geonode.core',\n 'geonode.maps',\n 'geonode.proxy',\n 'impact',\n 'django_nose',\n# 'rosetta',\n)\n\nLOGGING = {\n 'version': 1,\n# 'disable_existing_loggers': True,\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'\n },\n 'simple': {\n 'format': '%(levelname)s %(asctime)s %(message)s'\n },\n },\n 'handlers': {\n 'null': {\n 'level': 'DEBUG',\n 'class': 'django.utils.log.NullHandler',\n },\n 'console': {\n 'level': 'ERROR',\n 'class': 'logging.StreamHandler',\n 'formatter': 'simple'\n },\n 'file': {\n 'level': 'DEBUG',\n 'class' : 'logging.handlers.RotatingFileHandler',\n 'formatter': 'verbose',\n 'filename': os.path.join(PROJECT_ROOT, 'risiko.log'),\n 'maxBytes': '1024000',\n 'backupCount': '3',\n },\n 'mail_admins': {\n 'level': 'ERROR',\n 'class': 'django.utils.log.AdminEmailHandler',\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['null'],\n 'propagate': False,\n 'level': 'ERROR',\n },\n 'gsconfig': {\n 'handlers': ['null', 'file'],\n 'propagate': False,\n 'level': 'WARNING',\n },\n 'owslib': {\n 'handlers': ['null'],\n 'propagate': False,\n 'level': 'ERROR',\n },\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': False,\n },\n 'geonode.maps': {\n 'handlers': ['file'],\n 'propagate': False,\n 'level': 'WARNING',\n },\n 'risiko': {\n 'handlers': ['file'],\n 'level': 'DEBUG',\n 'propagate': False,\n }\n }\n}\n\n\ndef get_user_url(u):\n \"\"\"Helper function for profile module\n \"\"\"\n\n from django.contrib.sites.models import Site\n s = Site.objects.get_current()\n return 'http://' + s.domain + '/profiles/' + u.username\n\n\nABSOLUTE_URL_OVERRIDES = {'auth.user': get_user_url}\n\nAUTH_PROFILE_MODULE = 'maps.Contact'\nREGISTRATION_OPEN = False\nDB_DATASTORE = False\n\n# Get rid of a future warning in elemtree:\nimport warnings\ntry:\n warnings.filterwarnings(action='ignore', category=FutureWarning)\nexcept NameError:\n del warnings\n\ntry:\n from local_settings import *\nexcept ImportError:\n pass\n" }, { "alpha_fraction": 0.45557302236557007, "alphanum_fraction": 0.4868612587451935, "avg_line_length": 37.31026840209961, "blob_id": "63acab8e7ad4ad88b74428bb1eebfd96ba77d0d9", "content_id": "170be08cb00a3e3f0a092a2b6235279c701b8b19", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17163, "license_type": "no_license", "max_line_length": 79, "num_lines": 448, "path": "/impact/tests/test_interpolate.py", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "import unittest\nimport numpy\n\nfrom impact.engine.interpolation2d import interpolate2d, interpolate_raster\nfrom impact.tests.utilities import combine_coordinates\nfrom impact.storage.utilities import nanallclose\n\n\ndef linear_function(x, y):\n \"\"\"Auxiliary function for use with interpolation test\n \"\"\"\n\n return x + y / 2.0\n\n\nclass Test_interpolate(unittest.TestCase):\n\n def test_linear_interpolation_basic(self):\n \"\"\"Interpolation library works for linear function - basic test\n \"\"\"\n\n # Define pixel centers along each direction\n x = [1.0, 2.0, 4.0]\n y = [5.0, 9.0]\n\n # Define ny by nx array with corresponding values\n A = numpy.zeros((len(x), len(y)))\n\n # Define values for each x, y pair as a linear function\n for i in range(len(x)):\n for j in range(len(y)):\n A[i, j] = linear_function(x[i], y[j])\n\n # Test first that original points are reproduced correctly\n for i, xi in enumerate(x):\n for j, eta in enumerate(y):\n val = interpolate2d(x, y, A, [(xi, eta)], mode='linear')[0]\n ref = linear_function(xi, eta)\n assert numpy.allclose(val, ref, rtol=1e-12, atol=1e-12)\n\n # Then test that genuinly interpolated points are correct\n xis = numpy.linspace(x[0], x[-1], 10)\n etas = numpy.linspace(y[0], y[-1], 10)\n points = combine_coordinates(xis, etas)\n\n vals = interpolate2d(x, y, A, points, mode='linear')\n refs = linear_function(points[:, 0], points[:, 1])\n assert numpy.allclose(vals, refs, rtol=1e-12, atol=1e-12)\n\n def test_constant_interpolation_basic(self):\n \"\"\"Interpolation library works for piecewise constant function\n \"\"\"\n\n # Define pixel centers along each direction\n x = numpy.array([1.0, 2.0, 4.0])\n y = numpy.array([5.0, 9.0])\n\n # Define ny by nx array with corresponding values\n A = numpy.zeros((len(x), len(y)))\n\n # Define values for each x, y pair as a linear function\n for i in range(len(x)):\n for j in range(len(y)):\n A[i, j] = linear_function(x[i], y[j])\n\n # Then test that interpolated points are always assigned value of\n # closest neighbour\n xis = numpy.linspace(x[0], x[-1], 10)\n etas = numpy.linspace(y[0], y[-1], 10)\n points = combine_coordinates(xis, etas)\n\n vals = interpolate2d(x, y, A, points, mode='constant')\n\n # Find upper neighbours for each interpolation point\n xi = points[:, 0]\n eta = points[:, 1]\n idx = numpy.searchsorted(x, xi, side='left')\n idy = numpy.searchsorted(y, eta, side='left')\n\n # Get the four neighbours for each interpolation point\n x0 = x[idx - 1]\n x1 = x[idx]\n y0 = y[idy - 1]\n y1 = y[idy]\n\n z00 = A[idx - 1, idy - 1]\n z01 = A[idx - 1, idy]\n z10 = A[idx, idy - 1]\n z11 = A[idx, idy]\n\n # Location coefficients\n alpha = (xi - x0) / (x1 - x0)\n beta = (eta - y0) / (y1 - y0)\n\n refs = numpy.zeros(len(vals))\n for i in range(len(refs)):\n if alpha[i] < 0.5 and beta[i] < 0.5:\n refs[i] = z00[i]\n\n if alpha[i] >= 0.5 and beta[i] < 0.5:\n refs[i] = z10[i]\n\n if alpha[i] < 0.5 and beta[i] >= 0.5:\n refs[i] = z01[i]\n\n if alpha[i] >= 0.5 and beta[i] >= 0.5:\n refs[i] = z11[i]\n\n assert numpy.allclose(vals, refs, rtol=1e-12, atol=1e-12)\n\n def test_linear_interpolation_range(self):\n \"\"\"Interpolation library works for linear function - a range of cases\n \"\"\"\n\n for x in [[1.0, 2.0, 4.0], [-20, -19, 0], numpy.arange(200) + 1000]:\n for y in [[5.0, 9.0], [100, 200, 10000]]:\n\n # Define ny by nx array with corresponding values\n A = numpy.zeros((len(x), len(y)))\n\n # Define values for each x, y pair as a linear function\n for i in range(len(x)):\n for j in range(len(y)):\n A[i, j] = linear_function(x[i], y[j])\n\n # Test that linearly interpolated points are correct\n xis = numpy.linspace(x[0], x[-1], 100)\n etas = numpy.linspace(y[0], y[-1], 100)\n points = combine_coordinates(xis, etas)\n\n vals = interpolate2d(x, y, A, points, mode='linear')\n refs = linear_function(points[:, 0], points[:, 1])\n assert numpy.allclose(vals, refs, rtol=1e-12, atol=1e-12)\n\n def test_linear_interpolation_nan_points(self):\n \"\"\"Interpolation library works with interpolation points being NaN\n\n This is was the reason for bug reported in:\n https://github.com/AIFDR/riab/issues/155\n \"\"\"\n\n # Define pixel centers along each direction\n x = [1.0, 2.0, 4.0]\n y = [5.0, 9.0]\n\n # Define ny by nx array with corresponding values\n A = numpy.zeros((len(x), len(y)))\n\n # Define values for each x, y pair as a linear function\n for i in range(len(x)):\n for j in range(len(y)):\n A[i, j] = linear_function(x[i], y[j])\n\n # Then test that interpolated points can contain NaN\n xis = numpy.linspace(x[0], x[-1], 10)\n etas = numpy.linspace(y[0], y[-1], 10)\n xis[6:7] = numpy.nan\n etas[3] = numpy.nan\n points = combine_coordinates(xis, etas)\n\n vals = interpolate2d(x, y, A, points, mode='linear')\n refs = linear_function(points[:, 0], points[:, 1])\n assert nanallclose(vals, refs, rtol=1e-12, atol=1e-12)\n\n def test_linear_interpolation_nan_array(self):\n \"\"\"Interpolation library works (linear mode) with grid points being NaN\n \"\"\"\n\n # Define pixel centers along each direction\n x = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]\n y = [4.0, 5.0, 7.0, 9.0, 11.0, 13.0]\n\n # Define ny by nx array with corresponding values\n A = numpy.zeros((len(x), len(y)))\n\n # Define values for each x, y pair as a linear function\n for i in range(len(x)):\n for j in range(len(y)):\n A[i, j] = linear_function(x[i], y[j])\n A[2, 3] = numpy.nan # (x=2.0, y=9.0): NaN\n\n # Then test that interpolated points can contain NaN\n xis = numpy.linspace(x[0], x[-1], 12)\n etas = numpy.linspace(y[0], y[-1], 10)\n points = combine_coordinates(xis, etas)\n\n vals = interpolate2d(x, y, A, points, mode='linear')\n refs = linear_function(points[:, 0], points[:, 1])\n\n # Set reference result with expected NaNs and compare\n for i, (xi, eta) in enumerate(points):\n if (1.0 < xi <= 3.0) and (7.0 < eta <= 11.0):\n refs[i] = numpy.nan\n\n assert nanallclose(vals, refs, rtol=1e-12, atol=1e-12)\n\n def test_interpolation_random_array_and_nan(self):\n \"\"\"Interpolation library (constant and linear) works with NaN\n \"\"\"\n\n # Define pixel centers along each direction\n x = numpy.arange(20) * 1.0\n y = numpy.arange(25) * 1.0\n\n # Define ny by nx array with corresponding values\n A = numpy.zeros((len(x), len(y)))\n\n # Define arbitrary values for each x, y pair\n numpy.random.seed(17)\n A = numpy.random.random((len(x), len(y))) * 10\n\n # Create islands of NaN\n A[5, 13] = numpy.nan\n A[6, 14] = A[6, 18] = numpy.nan\n A[7, 14:18] = numpy.nan\n A[8, 13:18] = numpy.nan\n A[9, 12:19] = numpy.nan\n A[10, 14:17] = numpy.nan\n A[11, 15] = numpy.nan\n\n A[15, 5:6] = numpy.nan\n\n # Creat interpolation points\n xis = numpy.linspace(x[0], x[-1], 39) # Hit all mid points\n etas = numpy.linspace(y[0], y[-1], 73) # Hit thirds\n points = combine_coordinates(xis, etas)\n\n for mode in ['linear', 'constant']:\n vals = interpolate2d(x, y, A, points, mode=mode)\n\n # Calculate reference result with expected NaNs and compare\n i = j = 0\n for k, (xi, eta) in enumerate(points):\n\n # Find indices of nearest higher value in x and y\n i = numpy.searchsorted(x, xi)\n j = numpy.searchsorted(y, eta)\n\n if i > 0 and j > 0:\n\n # Get four neigbours\n A00 = A[i - 1, j - 1]\n A01 = A[i - 1, j]\n A10 = A[i, j - 1]\n A11 = A[i, j]\n\n if numpy.allclose(xi, x[i]):\n alpha = 1.0\n else:\n alpha = 0.5\n\n if numpy.allclose(eta, y[j]):\n beta = 1.0\n else:\n beta = eta - y[j - 1]\n\n if mode == 'linear':\n if numpy.any(numpy.isnan([A00, A01, A10, A11])):\n ref = numpy.nan\n else:\n ref = (A00 * (1 - alpha) * (1 - beta) +\n A01 * (1 - alpha) * beta +\n A10 * alpha * (1 - beta) +\n A11 * alpha * beta)\n elif mode == 'constant':\n assert alpha >= 0.5 # Only case in this test\n\n if beta < 0.5:\n ref = A10\n else:\n ref = A11\n else:\n msg = 'Unknown mode: %s' % mode\n raise Exception(msg)\n\n #print i, j, xi, eta, alpha, beta, vals[k], ref\n assert nanallclose(vals[k], ref, rtol=1e-12, atol=1e-12)\n\n def test_linear_interpolation_outside_domain(self):\n \"\"\"Interpolation library sensibly handles values outside the domain\n \"\"\"\n\n # Define pixel centers along each direction\n x = [1.0, 2.0, 4.0]\n y = [5.0, 9.0]\n\n # Define ny by nx array with corresponding values\n A = numpy.zeros((len(x), len(y)))\n\n # Define values for each x, y pair as a linear function\n for i in range(len(x)):\n for j in range(len(y)):\n A[i, j] = linear_function(x[i], y[j])\n\n # Simple example first for debugging\n xis = numpy.linspace(0.9, 4.0, 4)\n etas = numpy.linspace(5, 9.1, 3)\n points = combine_coordinates(xis, etas)\n refs = linear_function(points[:, 0], points[:, 1])\n\n vals = interpolate2d(x, y, A, points, mode='linear',\n bounds_error=False)\n msg = ('Length of interpolation points %i differs from length '\n 'of interpolated values %i' % (len(points), len(vals)))\n assert len(points) == len(vals), msg\n for i, (xi, eta) in enumerate(points):\n if xi < x[0] or xi > x[-1] or eta < y[0] or eta > y[-1]:\n assert numpy.isnan(vals[i])\n else:\n msg = ('Got %.15f for (%f, %f), expected %.15f'\n % (vals[i], xi, eta, refs[i]))\n assert numpy.allclose(vals[i], refs[i],\n rtol=1.0e-12, atol=1.0e-12), msg\n\n # Try a range of combinations of points outside domain\n # with error_bounds True\n for lox in [x[0], x[0] - 1]:\n for hix in [x[-1], x[-1] + 1]:\n for loy in [y[0], y[0] - 1]:\n for hiy in [y[-1], y[-1] + 1]:\n\n # Then test that points outside domain can be handled\n xis = numpy.linspace(lox, hix, 4)\n etas = numpy.linspace(loy, hiy, 4)\n points = combine_coordinates(xis, etas)\n\n if lox < x[0] or hix > x[-1] or \\\n loy < x[0] or hiy > y[-1]:\n try:\n vals = interpolate2d(x, y, A, points,\n mode='linear',\n bounds_error=True)\n except Exception, e:\n pass\n else:\n msg = 'Should have raise bounds error'\n raise Exception(msg)\n\n # Try a range of combinations of points outside domain with\n # error_bounds False\n for lox in [x[0], x[0] - 1, x[0] - 10]:\n for hix in [x[-1], x[-1] + 1, x[-1] + 5]:\n for loy in [y[0], y[0] - 1, y[0] - 10]:\n for hiy in [y[-1], y[-1] + 1, y[-1] + 10]:\n\n # Then test that points outside domain can be handled\n xis = numpy.linspace(lox, hix, 10)\n etas = numpy.linspace(loy, hiy, 10)\n points = combine_coordinates(xis, etas)\n refs = linear_function(points[:, 0], points[:, 1])\n vals = interpolate2d(x, y, A, points,\n mode='linear', bounds_error=False)\n\n assert len(points) == len(vals), msg\n for i, (xi, eta) in enumerate(points):\n if xi < x[0] or xi > x[-1] or\\\n eta < y[0] or eta > y[-1]:\n msg = 'Expected NaN for %f, %f' % (xi, eta)\n assert numpy.isnan(vals[i]), msg\n else:\n msg = ('Got %.15f for (%f, %f), expected '\n '%.15f' % (vals[i], xi, eta, refs[i]))\n assert numpy.allclose(vals[i], refs[i],\n rtol=1.0e-12,\n atol=1.0e-12), msg\n\n def test_interpolation_corner_cases(self):\n \"\"\"Interpolation library returns NaN for incomplete grid points\n \"\"\"\n\n # Define four pixel centers\n x = [2.0, 4.0]\n y = [5.0, 9.0]\n\n # Define ny by nx array with corresponding values\n A = numpy.zeros((len(x), len(y)))\n\n # Define values for each x, y pair as a linear function\n for i in range(len(x)):\n for j in range(len(y)):\n A[i, j] = linear_function(x[i], y[j])\n\n # Test that interpolated points are correct\n xis = numpy.linspace(x[0], x[-1], 3)\n etas = numpy.linspace(y[0], y[-1], 3)\n points = combine_coordinates(xis, etas)\n\n # Interpolate to cropped grids\n for xc, yc, Ac in [([x[0]], [y[0]], numpy.array([[A[0, 0]]])), # 1 x 1\n ([x[0]], y, numpy.array([A[0, :]])), # 1 x 2\n ]:\n\n vals = interpolate2d(xc, yc, Ac, points, mode='linear')\n msg = 'Expected NaN when grid %s is incomplete' % str(Ac.shape)\n assert numpy.all(numpy.isnan(vals)), msg\n\n def test_interpolation_raster_data(self):\n \"\"\"Interpolation library works for raster data\n\n This shows interpolation of data arranged with\n latitudes bottom - up and\n longitudes left - right\n \"\"\"\n\n # Create test data\n lon_ul = 100 # Longitude of upper left corner\n lat_ul = 10 # Latitude of upper left corner\n numlon = 8 # Number of longitudes\n numlat = 5 # Number of latitudes\n dlon = 1\n dlat = -1\n\n # Define array where latitudes are rows and longitude columns\n A = numpy.zeros((numlat, numlon))\n\n # Establish coordinates for lower left corner\n lat_ll = lat_ul - numlat\n lon_ll = lon_ul\n\n # Define pixel centers along each direction\n longitudes = numpy.linspace(lon_ll + 0.5,\n lon_ll + numlon - 0.5, numlon)\n latitudes = numpy.linspace(lat_ll + 0.5,\n lat_ll + numlat - 0.5, numlat)\n\n # Define raster with latitudes going bottom-up (south to north).\n # Longitudes go left-right (west to east)\n for i in range(numlat):\n for j in range(numlon):\n A[numlat - 1 - i, j] = linear_function(longitudes[j],\n latitudes[i])\n\n # Then test that interpolated points are correct\n xis = numpy.linspace(lon_ll + 1, lon_ll + numlon - 1, 100)\n etas = numpy.linspace(lat_ll + 1, lat_ll + numlat - 1, 100)\n points = combine_coordinates(xis, etas)\n\n vals = interpolate_raster(longitudes, latitudes, A, points,\n mode='linear')\n refs = linear_function(points[:, 0], points[:, 1])\n\n assert numpy.allclose(vals, refs, rtol=1e-12, atol=1e-12)\n\n\nif __name__ == '__main__':\n suite = unittest.makeSuite(Test_interpolate, 'test')\n runner = unittest.TextTestRunner(verbosity=2)\n runner.run(suite)\n" }, { "alpha_fraction": 0.5208545327186584, "alphanum_fraction": 0.5361139178276062, "avg_line_length": 32.89655303955078, "blob_id": "ba83e616219f9161a03f73467082e51119b6fe8b", "content_id": "c9e12e07a97c2e563620beeeec0a5d9e4f0b2fc7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1966, "license_type": "no_license", "max_line_length": 70, "num_lines": 58, "path": "/impact/tests/plugins/unspecific_building_impact_model.py", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "from django.template.loader import render_to_string\nfrom impact.plugins.core import FunctionProvider\nfrom impact.storage.vector import Vector\n\n\nclass EarthquakeBuildingDamageFunction(FunctionProvider):\n \"\"\"Risk plugin for earthquake damage to buildings\n\n :param requires category=='hazard' and \\\n subcategory.startswith('earthquake') and \\\n layer_type=='raster'\n :param requires category=='exposure' and \\\n subcategory.startswith('building')\n \"\"\"\n\n @staticmethod\n def run(layers):\n \"\"\"Risk plugin for earthquake school damage\n \"\"\"\n\n # Extract data\n # FIXME (Ole): This will be replaced by a helper function\n # to separate hazard from exposure using keywords\n H = layers[0] # Ground shaking\n E = layers[1] # Building locations\n\n # Interpolate hazard level to building locations\n H = H.interpolate(E)\n\n # Extract relevant numerical data\n coordinates = E.get_geometry()\n shaking = H.get_data()\n\n # Calculate building damage\n building_damage = []\n for i in range(len(shaking)):\n x = float(shaking[i].values()[0])\n if x < 6.0:\n value = 0.0\n else:\n value = (0.692 * (x ** 4) -\n 15.82 * (x ** 3) +\n 135.0 * (x ** 2) -\n 509.0 * x +\n 714.4)\n\n building_damage.append({'DAMAGE': value, 'MMI': x})\n\n # FIXME (Ole): Need helper to generate new layer using\n # correct spatial reference\n # (i.e. sensibly wrap the following lines)\n projection = E.get_projection()\n\n V = Vector(data=building_damage,\n projection=E.get_projection(),\n geometry=coordinates,\n name='Estimated pct damage')\n return V\n" }, { "alpha_fraction": 0.4264705777168274, "alphanum_fraction": 0.4264705777168274, "avg_line_length": 10.333333015441895, "blob_id": "256a9b18e4af8c938da61e2505a821c245b508bd", "content_id": "5000cc8bdbe3cec98a6b8aa76ab9efb26116bbbc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 68, "license_type": "no_license", "max_line_length": 17, "num_lines": 6, "path": "/docs/development/release-process.rst", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "===============\nRelease Process\n===============\n\n\nNo documentation.\n" }, { "alpha_fraction": 0.5840414762496948, "alphanum_fraction": 0.5909943580627441, "avg_line_length": 31.476701736450195, "blob_id": "c76278bf5c0a99fde084881467e3400671c712c7", "content_id": "0d7499f69c81f364e54fe19a3942f51d0c07b820", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9061, "license_type": "no_license", "max_line_length": 79, "num_lines": 279, "path": "/impact/plugins/core.py", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "\"\"\"Function to manage self-registering plugins\n\nThe design is based on http://effbot.org/zone/metaclass-plugins.htm\n\nTo register the plugin, the module must be imported by the Python process\nusing it.\n\"\"\"\n\nfrom django.template.loader import render_to_string\nfrom impact.plugins.utilities import ColorMapEntry\nimport types\nimport keyword\n\nimport logging\nlogger = logging.getLogger('risiko')\n\n\nclass PluginMount(type):\n def __init__(cls, name, bases, attrs):\n if not hasattr(cls, 'plugins'):\n # This branch only executes when processing the mount point itself.\n # So, since this is a new plugin type, not an implementation, this\n # class shouldn't be registered as a plugin. Instead, it sets up a\n # list where plugins can be registered later.\n cls.plugins = []\n else:\n # This must be a plugin implementation, which should be registered.\n # Simply appending it to the list is all that's needed to keep\n # track of it later.\n cls.plugins.append(cls)\n\n\nclass FunctionProvider:\n \"\"\"Mount point for plugins which refer to actions that can be performed.\n\n Plugins implementing this reference should provide the following method:\n\n run(layers)\n\n =============== =========================\n layers A list of layers\n result A list of layers\n =============== =========================\n \"\"\"\n __metaclass__ = PluginMount\n\n target_field = 'DAMAGE'\n symbol_field = 'USE_MAJOR'\n\n def generate_style(self, data):\n \"\"\"Make a default style for all plugins\n \"\"\"\n\n # The parameters are substituted into the sld according the the\n # Django template methodology:\n # https://docs.djangoproject.com/en/dev/ref/templates/\n # builtins/?from=olddocs\n\n params = {'name': data.get_name()}\n\n if data.is_raster:\n colormapentries = [\n ColorMapEntry(color='#ffffff', opacity='0',\n quantity='-9999.0'),\n ColorMapEntry(color='#38A800', opacity='0',\n quantity='0.1'),\n ColorMapEntry(color='#38A800', quantity='0.2'),\n ColorMapEntry(color='#79C900', quantity='0.5'),\n ColorMapEntry(color='#CEED00', quantity='1'),\n ColorMapEntry(color='#FFCC00', quantity='2'),\n ColorMapEntry(color='#FF6600', quantity='3'),\n ColorMapEntry(color='#FF0000', quantity='5'),\n ColorMapEntry(color='#7A0000', quantity='9')]\n\n params['colormapentries'] = colormapentries\n return render_to_string('impact/styles/raster.sld', params)\n elif data.is_vector:\n params['damage_field'] = self.target_field\n return render_to_string('impact/styles/vector.sld', params)\n\n\ndef get_plugins(name=None):\n \"\"\"Retrieve a list of plugins that match the name you pass\n\n Or all of them if no name is passed.\n \"\"\"\n\n plugins_dict = dict([(pretty_function_name(p), p)\n for p in FunctionProvider.plugins])\n\n if name is None:\n return plugins_dict\n\n if isinstance(name, basestring):\n # Add the names\n plugins_dict.update(dict([(p.__name__, p)\n for p in FunctionProvider.plugins]))\n\n msg = ('No plugin named \"%s\" was found. '\n 'List of available plugins is: %s'\n % (name, ', '.join(plugins_dict.keys())))\n\n assert name in plugins_dict, msg\n return [{name: plugins_dict[name]}]\n else:\n msg = ('get_plugins expects either no parameters or a string '\n 'with the name of the plugin, you passed: '\n '%s which is a %s' % (name, type(name)))\n raise Exception(msg)\n\n\ndef get_plugin(name):\n \"\"\"Get plugin that matches given name\n\n This is just a wrapper around get_plugins to simplify\n \"\"\"\n\n plugin_list = get_plugins(name)\n _, impact_function = plugin_list[0].items()[0]\n\n return impact_function\n\n\ndef pretty_function_name(func):\n \"\"\"Return a human readable name for the function\n if the function has a func.plugin_name use this\n otherwise turn underscores to spaces and Caps to spaces \"\"\"\n\n if not hasattr(func, 'plugin_name'):\n nounderscore_name = func.__name__.replace('_', ' ')\n func_name = ''\n for i, c in enumerate(nounderscore_name):\n if c.isupper() and i > 0:\n func_name += ' ' + c\n else:\n func_name += c\n else:\n func_name = func.plugin_name\n return func_name\n\n\ndef requirements_collect(func):\n \"\"\"Collect the requirements from the plugin function doc\n\n The requirements need to be specified using\n :param requires <valid pythhon expression>\n The layer keywords are put into the local name space\n each requires should be on a new line\n a '/' at the end of a line will be a continuation\n\n returns the strings for the python exec\n\n Example of valid requires\n :param requires category==\"impact\" and subcategory.startswith(\"population\"\n \"\"\"\n requireslines = None\n if hasattr(func, '__doc__') and func.__doc__:\n docstr = func.__doc__\n\n require_cmd = ':param requires'\n\n lines = docstr.split('\\n')\n requires_lines = []\n\n join_line = False\n\n for cnt, line in enumerate(lines):\n doc_line = line.strip()\n if len(doc_line) == 0:\n continue\n\n if join_line and not doc_line.startswith(require_cmd):\n requires_lines[-1] = requires_lines[-1][:-1] + doc_line\n\n elif doc_line.startswith(require_cmd):\n requires_lines.append(doc_line[len(require_cmd) + 1:])\n\n join_line = doc_line[-1] == '/'\n\n return requires_lines\n\n\ndef requirement_check(params, require_str, verbose=False):\n \"\"\"Checks a dictionary params against the requirements defined\n in require_str. Require_str must be a valid python expression\n and evaluate to True or False\"\"\"\n\n # Some keyword should never go into the requirement check\n # FIXME (Ole): This is not the most robust way. If we get a\n # more general way of doing metadata we can treat caption and\n # many other things separately. See issue #148\n excluded_keywords = ['caption']\n\n execstr = 'def check():\\n'\n for key in params.keys():\n if key == '':\n if params[''] != '':\n # This should never happen\n msg = ('Empty key found in requirements with '\n 'non-empty value: %s' % params[''])\n raise Exception(msg)\n else:\n continue\n\n # Check that symbol is not a Python keyword\n if key in keyword.kwlist:\n msg = ('Error in plugin requirements'\n 'Must not use Python keywords as params: %s' % (key))\n logger.error(msg)\n return False\n\n if key in excluded_keywords:\n continue\n\n if isinstance(params[key], basestring):\n execstr += ' %s = \"%s\" \\n' % (key.strip(), params[key])\n else:\n execstr += ' %s = %s \\n' % (key.strip(), params[key])\n\n execstr += ' return ' + require_str\n\n if verbose:\n print execstr\n try:\n exec(compile(execstr, '<string>', 'exec'))\n return check()\n except NameError, e:\n # This condition will happen frequently since the function\n # is evaled against many params that are not relevant and\n # hence correctly return False\n pass\n except Exception, e:\n msg = ('Requirements header could not compiled: %s. '\n 'Original message: %s' % (execstr, e))\n logger.error(msg)\n\n return False\n\n\ndef requirements_met(requirements, params, verbose=False):\n \"\"\"Checks the plugin can run with a given layer.\n\n Based on the requirements specified in the doc string.\n\n Returns:\n True: if there are no requirements or they are all met.\n False: if it has requirements and none of them are met.\n \"\"\"\n if len(requirements) == 0:\n # If the function has no requirements, then they are all met.\n return True\n\n for requires in requirements:\n if requirement_check(params, requires):\n return True\n\n # If none of the conditions above is met, return False.\n return False\n\n\ndef compatible_layers(func, layer_descriptors):\n \"\"\"Fetches all the layers that match the plugin requirements.\n\n Input\n func: ? (FIXME(Ole): Ted, can you fill in here?\n layer_descriptor: Layer names and meta data (keywords, type, etc)\n\n Output:\n Array of compatible layers, can be an empty list.\n \"\"\"\n\n layers = []\n requirements = requirements_collect(func)\n\n for layer_name, layer_params in layer_descriptors:\n if requirements_met(requirements, layer_params):\n layers.append(layer_name)\n\n return layers\n" }, { "alpha_fraction": 0.7311861515045166, "alphanum_fraction": 0.7422128319740295, "avg_line_length": 31.22891616821289, "blob_id": "d1831c5218b91916fba280c41d5e0f15b36358d8", "content_id": "feb326b7d410c6d4d55fc1d00f73afefef65e5c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 16052, "license_type": "no_license", "max_line_length": 124, "num_lines": 498, "path": "/docs/development/architecture.rst", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "\nSoftware Design Specification\n=============================\n\nThis document is a software design specification of the Risiko solution. \nIt is intended as a reference document for\nthe software architecture to inform and guide developers about the\narchitecture, standards, coding conventions, use cases and design constrains.\n\nGood risk analyses rely on modelling using spatial information ranging from\ngeophysical data to population information and administrative jurisdictions.\nThe purpose tool is to support the implementation of risk assessment\nguidelines by:\n\n1. Making them easy to use\n2. Allowing results to be reproduced\n3. Ensuring consistency across different reporting authorities\n\nIn support of these aims risk in the box will:\n\n1. Be able to be run across a variety of common platforms\n2. Run both locally without Internet access and remotely\n3. Use commonly available standards and technologies\n4. Support interfaces to a variety of external systems (e.g. GeoNode)\n5. Support flexible development of impact models using plugins\n6. Be internationalized\n\nBasic system requirements to use Risk in a Box are GeoNode, Apache, Python,\nDjango, Open Layers. This includes having a Java Runtime Environment\ninstalled.\n\n\nSystem Overview\n---------------\n\nRiab in essence provides a way that scientific data in the form of spatial\nhazard estimates from tsunami, earthquake, volcanic ash, storms etc. can be\ncombined with exposure data such as population or infrastructure to provide\nend users such as disaster managers with a geographic risk/impact estimation.\nThis will ultimately allow the construction of an actionable risk management\nplan.\n\nTo this end the system must be able to take a variety of geographic data\nlayers (in either vector or raster) from a GeoNode and intelligently work out\nwhat impact function will be appropriate for a given hazard and exposure.\nThese impact functions will be written as plugins and will allow experts to\nconstruct new techniques for an impact calculation of the form\n\nImpact = Risk_Plugin(Hazard, Exposure)\n\nThe hazard and exposure types will be determined from the meta data stored\nwith the GeoNode layer and this is used to choose the correct plugin.\n\nFor flexibility and maintainability the software is split into two major\nsubsystems which will communicate using XML-RPC.\n\n1. Riab-Engine: The central server that will calculate the impact\n function using plugins and data fetched from the GeoServers. It is\n expected to run standalone and have a dependency in Celery. With code\n based on the djcelery package (http://pypi.python.org/pypi/django-celery)\n2. Riab-Client: The web based front-end allowing both a simplified end-\n user front end and a more advanced administration mode.\n\n.. figure:: https://docs.google.com/drawings/pubid=1DG2RT3wREAd0fC0mGUqgbFR3YwNDY9QWHZ4Kb7p_uRU&w=960&h=720\n\nFigure 1: High Level Riab components\n\n\nDesign Considerations\n---------------------\n\nThis section describes the design issues and considerations that are being\naddressed during the full design process.\n\n\nAssumptions and Dependencies\n----------------------------\n\nPrimary dependencies exist with the GeoServer REST interface and the Django\nVersion.\n\nDjango was chosen as the web framework as it is synergic with other relevant\nproject and has an active development community.\n\nPython 2.7x is being used to develop this project (both for Django and\nServer) as it provides good flexibility for this type of system design.\nVersion 3.x of python has been released, and will eventually supersede the\n2.x series however support for 3.x in third party libraries is currently\nstill low so the risk of software issues in using python versions >2 judged\nto be higher as of Jan 2011.\n\nThe Riab solution will be implemented in phases. See Riab Projec Plan for\ndetails.\n\nThe verson 1.0 assumptions are included below:\n\n1. Riab will need to be able to run on a local disconnected PC via a USB\n interface.\n2. Centralized server installation must also be supported\n3. Windows and Linux (developed using Ubuntu >= 10.4) will need to be supported\n\n**End-user characteristics : Risk Managers**\n\n1. Risk Managers will not be expert in hazard modelling\n2. Will use the system through a web browser\n3. Interface must be simple and support full language\n internationalization.\n4. Input should allow local users to upload geo-data from spreadsheets\n about local conditions.\n5. Output should be clear and understandable.\n6. An expert advanced user mode should be supported for more experienced\n users.\n\n**End-user characteristics: Advanced Modellers**\n\n1. Must be able to upload maps layers and set layer metadata\n2. Should be able to use the plugin API to define new risk/impact\n functions\n\n**End-user characteristics: Administrators**\n\n1. Should be able to setup users permissions\n2. Review an audit of activities\n3. Update local documentation\n\n**Possible and/or probable changes in functionality**\n\n1. Support for more complex impact models\n2. Output should lead to a full risk management plan\n3. Increase support for probabilistic modeling (on a hazard by hazard\n basis)\n4. Interface with other Risk based web frameworks and with science based\n hazard estimation tools.\n\n\nGeneral Constraints\n-------------------\n\nDescribe any global limitations or constraints that have a significant impact\non the design of the system's software (and describe the associated impact).\nSuch constraints may be imposed by any of the following:\n\n* Hardware or software environment\n\n* Limitation of no network cases or low speced machines\n\n* End-user environment\n\n* Standards compliance\n\n + Should conform with international standards including WMS `http://www.opengeospatial.org/standards/wms)`_\n\n* Interoperability requirements\n\n + OGC compliant protocols (as above)\n\n* Interface/protocol requirements\n\n + Must be able to be completely distributed (i.e. remote geoservers) or completely local (everything running on one PC)\n\n* Data repository and distribution requirements\n\n* Security requirements (or other such regulations)\n\n + The system should not hold user sensitive data\n\n + Consideration should be given to OpenID as a standard for authentication.\n\n* Memory and other capacity limitations\n\n + Restrictions may exist for the system when installed on a USB Stick\n\n* Performance requirements\n\n + Peak transaction volume even when centralizated will be relatively low (less than 1 request per second)\n\n* Verification and validation requirements (testing)\n\n + All builds should have a full test suite used\n\n\nGoals and Guidelines\n--------------------\n\nPrinciples which embody the design of software include:\n\n1. Modularity and functional separation. Ensuring that API level\n separation (via web services) is maintained between the functional\n components (Server, Web frontend and GeoServer)\n2. Emphasis on maintainability and robustness versus speed. Since this\n will be an open source project it is desired to make the code simple and\n well documented.\n3. Ability to play well with other relevant frameworks. The Riab system\n will need to integrate with other Risk based web frameworks and with\n science based hazard estimation tools e.g. OpenQuake, BNPB DIPI,\n Bakosurtanal SIGN project etc.\n\n\n\n\n\nArchitectural Strategies\n------------------------\n\nThe Riab_app is designed to be stateless. This provides both greater\nflexibility and robustness as it allows for easier scaling and for more\ncomprehensive testing. The impact of this is a slight performance hit since\nreconnections (and re: authentication) to GeoServers need to be done for each\ntransaction.\n\nAll user settings and user interface will be managed through the Django\nframework application. The GeoServer rendering will be done using OpenLayers\n(http://openlayers.org/) and other associated javascript GeoExt, GXP. Where\npractical functions will be exposed as Ajax calls.\n\nThe web interface is yet to be documented.\n\n\nSystem Architecture\n-------------------\n\nThis section provides a high-level overview of how the functionality and\nresponsibilities are partitioned and then assigned to subsystems and\ncomponents. The various architectural components of Riab and the protocols\nused are described below \n\nRiab Core (riab_core): This module is responsible for calculating the impact\nfunction. It uses file like objects (e.g. geotiff and gml) and associated\nmetadata to determine which risk plug-in to call. It then calls this plugin\nand writes the resulting layer to file and returns the fully qualified\npathname. Riab Core makes the following assumptions:\n\n1. Input layer files are either geotiff (for raster data) or gml (for\n vector data)\n2. All layers are in WGS84 geographic coordinates\n3. Layers are named (either as dictionaries or using the internal naming\n structure of geotiff and gml)\n\nRisk Plugins: These are plugins written in python that allow customized\nimpact functions to be run depending on the type of hazard and the exposure.\nThere may be none, one or many plugins that will satisfy a particular\ncombination of hazard and exposure. Each plugin makes the following\nassumptions\n\n1. Input data are dictionaries of numerical (numpy) arrays where keys\n are the original layer names.\n2. Data points have been aligned so vector operations are allowed.\n3. It is up to the plugin to know the semantics of names and attributes,\n i.e. if there is a layer named WALL_TYPE with attributes like Fibro,\n Timber, Brick veneer etc, the plugin must be aware of the meaning of\n these names and used them correctly.\n\nPyPlugin: A flexible python library to manage the plugins, find the\nappropriate plugin for a given criteria and execute this.\n\nRiab Server (riab_server): This is the central stateless server that exposes\nthe API for riab_core via XML-RPC.\n\nRiab Web Server (riab_django): The web based front-end allowing both a\nsimplified, advanced and administration user types. Riab-django is\nresponsible for retrieving and storing layers on one or more GeoNode and for\npassing the associated files on to riab_server for computation. The web\nclient can query the Riab-Server to find out what plugins are available and\nrequest an impact calculation based on one or more layers hazard and one or\nmore exposure layers. The administration of users and other local settings\nare managed by Django. In particular it will\n\n1. Allow the user to select layers for hazard levels and exposure data\n2. Get layers from GeoNodes by bounding box and in WGS84 geographical\n coordinates irrespective of the native projection or datum and provide\n them to riab_server as geotiff (for rasters) or gml (for vector data).\n3. Put resulting layers back to a GeoNode and provide a view of them\n4. Provide legends for all layers\n5. ..\n\nRiab Web Interface: Rendered using Django Templates and OpenLayers . The\ninterface talks to both the Riap-Django and the relavent GeoServers.\n\n\nComponent Communications\n------------------------\n\nThe flow of information between subsystems is shown below (). Note that this\ndiagram includes a full test case including the initial upload of data into\nGeoserver. This will not be required for risk managers. The bold items show\nsteps that are either input or output for the user.\n\n\n\n\nFigure 3: Riab Component Communications Flow\n\n\n\n\nDetailed System Design\n----------------------\n\nThis section contain a detailed designs of the Riab system components.\n\n\nRIAB HTTP API\n-------------\n\nThe API documentation::\n\nAll API calls start with http://myriab.com/api/v1\n\n\nVersion\n:::::::\n\n\nAll API calls begin with API version. For this documentation, we will assume\nevery request begins with the above path.\n\nPath\n::::\n\nFor this documentation, we will assume every request begins with the above\npath.\n\nUnits\n:::::\n\nAll coordinates are in WGS-84 (EPSG:4326) unless otherwise specified and all\nunits of measurement are in the International System of Units (SI).\n\nFormat\n::::::\n\nAll calls are returned in JSON.\n\nStatus Codes\n::::::::::::\n\n1. 200 Successful GET and PUT.\n2. 201 Successful POST.\n3. 202 Successful calculation queued.\n4. 204 Successful DELETE\n5. 401 Unauthenticated.\n6. 409 Unsuccessful POST, PUT, or DELETE (Will return an errors object).\n\nEndpoints\n:::::::::\n\n1. POST`/calculation`_\n2. GET`/calculation/:id`_\n3. GET`/calculation/:id/status`_\n4. GET`/functions`_\n5. GET`/functions/:id`_\n\n\nPOST /calculation\n.................\n\nCalculate the Impact as a function of Hazards and Exposures. Required fields\nare\n\n\n1. impact_function: URI of the impact function to be run\n2. hazards: A dictionary of named hazard levels .. {h1: H1, h2: H2,\n hn: HN] each H is either a GeoNode layer uri or a geoserver layer\n path where each layer follows the format\n username:userpass@geoserver_url:layer_name\n3. exposure: An array of exposure levels ..[E1,E2...EN] each E is either\na download url a geoserver layer path\n4. impact_level: The output impact level. Possible responses include 202 or 409\n\nexample request::\n\n curl -u alice:cooper http://myriab.com/api/v1/calculation \\\n -F \"impact_function=/functions/1\" \\\n -F \"hazards=/data/geonode:hazard1\" \\\n -F \"exposure=user:pass@geoserver_url:exposure_1\" \\\n -F \"keywords=some,keywords,added,to,the,created,map\"\n\n\nresponse::\n\n 202 Accepted\n {\n \"uri\": \"/riab/calculation/9\",\n \"transition_uri\": \"/riab/calculation/9/status\",\n \"warnings\": [ \"Projection unknown, layer geoserver_url:exposure_1 does not\n have projection information\" ]\n }\n\nanother possible response::\n\n 409 Conflict\n [\n \"Invalid Impact function: Impact function does not support the hazard and/or exposure type\",\n ]\n\n\nGET /calculation/:id\n....................\n\nReturns the details of a given calculation. Api will respond with status 200\nif calculation has been completed and 404 if it is still in progress.\n\nexample request::\n\n $ curl -u alice:cooper http://myriab.com/api/v1/calculation/9\n\nresponse::\n\n [\n {\n \"uri\": \"/riab/calculation/9\",\n \"result_uri\": \"/data/layer/54\",\n \"calculation_map_uri\": \"/data/maps/23\",\n \"info\": [\"Retrieving data for layer x\", \"Calculating impact\", \"Warning:\n Had to cast doubles to single precision\", \"Calculation finished\n successfully\", \"Uploading impact data\", \"Creating map in geonode with hazard,\n exposure and impact layers\"]\n }\n ]\n\n\nGET /calculation/:id/status\n...........................\n\nGets the status of the calculation. It will usually respond with 200.\n\nexample request::\n\n $ curl -u alice:cooper http://myriab.com/api/calculation/9/status\n\nresponse::\n\n [\n {\n \"success\": \"true\",\n \"message\": \"The calculation has been performed successfully\"\n }\n ]\n\nanother possible response::\n\n [\n {\n \"success\": \"false\",\n \"message\": \"An error has occurred during processing: (if you have admin rights a full stack trace can be found below)\"\n }\n ]\n\nGET /functions\n..............\n\nReturns a collection of impact functions, if no hazard or exposure levels are\nprovided it returns all the available ones.. Response will be 200\n\nexample request::\n\n $ curl -u alice:cooper http://myriab.com/api/v1/functions \\\n -F \"hazards=/data/geonode:HazardZ\" \\\n -F \"exposure=/data/geonode:ExposureX\"\n\nresponse::\n\n [\n {\n \"uri\": \"/functions/1\",\n \"name\": \"Super duper impact function\",\n \"author\": \"Alice cooper\",\n \"description\": \"It does what you expect it to ....\"\n },\n {\n \"uri\": \"/functions/2\",\n \"name\": \"Another nice impact function\",\n \"author\": \"Alice Cooper\",\n \"description\": \"You can't imagine ...\"\n },\n ...\n ]\n\n\nGET /function/:id\n.................\n\nReturns the details of the given impact function. Possible responses include\n200 or 404\n\nexample request::\n\n $ curl -u alice:cooper http://myriab.com/api/v1/function/1\n\nresponse::\n\n [\n {\n \"uri\": \"/functions/1\",\n \"name\": \"Another nice impact function\",\n \"author\": \"Alice Cooper\",\n \"description\": \"You can't imagine ...\"\n }\n ]\n\n" }, { "alpha_fraction": 0.5642856955528259, "alphanum_fraction": 0.6142857074737549, "avg_line_length": 16.375, "blob_id": "c9e52190640469fb505270fa807f67565d35386a", "content_id": "4169f4868e35480c5567c2d880c6ba614f7f770e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 140, "license_type": "no_license", "max_line_length": 34, "num_lines": 8, "path": "/__version__.py", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "\"\"\"Version information for Risiko.\n\"\"\"\n\n# Author: Ole Nielsen <[email protected]>\n# Copyright (c) 2011 AIFDR, GFDRR\n# License: GPL\n\n__version__ = '0.5.1'\n\n" }, { "alpha_fraction": 0.5104877948760986, "alphanum_fraction": 0.5370891094207764, "avg_line_length": 37.404319763183594, "blob_id": "3e185938317eeeec3d7193a8aad293b8541c536b", "content_id": "1f87c7fbf92c4d04863b065030d3ef1121bac626", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12443, "license_type": "no_license", "max_line_length": 195, "num_lines": 324, "path": "/impact/plugins/earthquake/BNPB_earthquake_guidelines.py", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "\"\"\"Impact function based on Padang 2009 post earthquake survey\n\nThis impact function estimates percentual damage to buildings as a\nfunction of ground shaking measured in MMI.\nBuildings are currently assumed to be represented in OpenStreetMap with\nattributes collected as during the July 2011 Indonesian mapping competition.\n\nThis impact function maps the OSM buildings into 2 classes:\nUnreinforced masonry (URM) and reinforced masonry (RM) according to\nthe guidelines.\n\"\"\"\n\nfrom django.template.loader import render_to_string\nfrom impact.plugins.core import FunctionProvider\nfrom impact.storage.vector import Vector\nfrom django.utils.translation import ugettext as _\nfrom impact.plugins.utilities import PointZoomSize\nfrom impact.plugins.utilities import PointClassColor\nfrom impact.plugins.utilities import PointSymbol\nfrom impact.plugins.mappings import osm2bnpb, unspecific2bnpb\n\n# Damage 'curves' for the two vulnerability classes\ndamage_parameters = {'URM': [6, 7],\n 'RM': [6, 8]}\n\n\nclass EarthquakeGuidelinesFunction(FunctionProvider):\n \"\"\"Risk plugin for BNPB guidelines for earthquake damage to buildings\n\n :param requires category=='hazard' and \\\n subcategory.startswith('earthquake') and \\\n layer_type=='raster'\n :param requires category=='exposure' and \\\n subcategory.startswith('building') and \\\n layer_type=='vector'\n \"\"\"\n\n # FIXME (Ole): Something like this too\n # and \\\n # datatype=='osm'\n\n vclass_tag = 'VCLASS'\n target_field = 'DMGLEVEL'\n\n def run(self, layers):\n \"\"\"Risk plugin for earthquake school damage\n \"\"\"\n\n # Extract data\n H = layers[0] # Ground shaking\n E = layers[1] # Building locations\n\n # Map from OSM attributes to the guideline classes (URM and RM)\n # FIXME (Ole): Not very robust way of deciding\n # Need keyword identifier for each kind of building dataset.\n if E.get_name().lower().startswith('osm'):\n # Map from OSM attributes to the padang building classes\n E = osm2bnpb(E, target_attribute=self.vclass_tag)\n else:\n E = unspecific2bnpb(E, target_attribute=self.vclass_tag)\n\n # Interpolate hazard level to building locations\n H = H.interpolate(E)\n\n # Extract relevant numerical data\n coordinates = E.get_geometry()\n shaking = H.get_data()\n N = len(shaking)\n\n # List attributes to carry forward to result layer\n attributes = E.get_attribute_names()\n\n # Calculate building damage\n count3 = 0\n count2 = 0\n count1 = 0\n building_damage = []\n for i in range(N):\n mmi = float(shaking[i].values()[0])\n\n building_class = E.get_data(self.vclass_tag, i)\n lo, hi = damage_parameters[building_class]\n\n if mmi < lo:\n damage = 1 # Low\n count1 += 1\n elif lo <= mmi < hi:\n damage = 2 # Medium\n count2 += 1\n else:\n damage = 3 # High\n count3 += 1\n\n # Collect shake level and calculated damage\n result_dict = {self.target_field: damage,\n 'MMI': mmi}\n\n # Carry all orginal attributes forward\n for key in attributes:\n result_dict[key] = E.get_data(key, i)\n\n # Record result for this feature\n building_damage.append(result_dict)\n\n # Create report\n caption = ('<table border=\"0\" width=\"320px\">'\n ' <tr><th><b>%s</b></th><th><b>%s</b></th></th>'\n ' <tr></tr>'\n ' <tr><td>%s&#58;</td><td>%i</td></tr>'\n ' <tr><td>%s (10-25%%)&#58;</td><td>%i</td></tr>'\n ' <tr><td>%s (25-50%%)&#58;</td><td>%i</td></tr>'\n ' <tr><td>%s (50-100%%)&#58;</td><td>%i</td></tr>'\n '</table>' % (_('Buildings'), _('Total'),\n _('All'), N,\n _('Low damage'), count1,\n _('Medium damage'), count2,\n _('High damage'), count3))\n\n # Create vector layer and return\n V = Vector(data=building_damage,\n projection=E.get_projection(),\n geometry=coordinates,\n name='Estimated damage level',\n keywords={'caption': caption})\n\n return V\n\n def generate_style(self, data):\n \"\"\"Generates and SLD file based on the data values\n \"\"\"\n\n if data.is_point_data:\n return self.generate_point_style(data)\n elif data.is_polygon_data:\n return self.generate_polygon_style(data)\n else:\n msg = 'Unknown style %s' % str(data)\n raise Exception(msg)\n\n def generate_point_style(self, data):\n \"\"\"Generates and SLD file based on the data values\n \"\"\"\n\n # Define default behaviour to be used when\n # - symbol attribute is missing\n # - attribute value is None or ''\n DEFAULT_SYMBOL = 'circle'\n\n symbol_field = None\n\n # FIXME: Replace these by dict and extend below\n symbol_keys = [None, '']\n symbol_values = [DEFAULT_SYMBOL, DEFAULT_SYMBOL]\n\n # Predefined scales and corresponding font sizes\n scale_keys = [10000000000, 10000000, 5000000,\n 1000000, 500000, 250000, 100000]\n scale_values = [3, 5, 8, 12, 14, 16, 18]\n\n # Predefined colour classes\n class_keys = ['No Damage', '10-25', '25-50', '50-100']\n class_values = [{'min': -0.5, 'max': 0.5,\n 'color': '#cccccc', 'opacity': '1'},\n {'min': 0.5, 'max': 1.5,\n 'color': '#fecc5c', 'opacity': '1'},\n {'min': 1.5, 'max': 2.5,\n 'color': '#fd8d3c', 'opacity': '1'},\n {'min': 2.5, 'max': 3.5,\n 'color': '#f31a1c', 'opacity': '1'}]\n\n # Definition of symbols for each attribute value\n if self.symbol_field in data.get_attribute_names():\n\n # Get actual symbol field to use\n symbol_field = self.symbol_field\n\n symbols = {'Church/Mosque': 'ttf://ESRI US MUTCD 3#0x00F6',\n 'Commercial (office)': 'ttf://ESRI Business#0x0040',\n 'Hotel': 'ttf://ESRI Public1#0x00b6',\n 'Medical facility': 'ttf://ESRI Cartography#0x00D1',\n 'Other': 'ttf://ESRI Business#0x002D',\n 'Other industrial': 'ttf://ESRI Business#0x0043',\n 'Residential': 'ttf://ESRI Cartography#0x00d7',\n 'Retail': 'ttf://Comic Sans MS#0x0024',\n 'School': 'ttf://ESRI Cartography#0x00e5',\n 'Unknown': 'ttf://Comic Sans MS#0x003F',\n 'Warehouse': 'ttf://ESRI US MUTCD 3#0x00B5'}\n else:\n symbols = {None: DEFAULT_SYMBOL, '': DEFAULT_SYMBOL}\n\n # Generate sld style file\n params = dict(name=data.get_name(),\n damage_field=self.target_field,\n symbol_field=symbol_field,\n symbols=symbols,\n scales=dict(zip(scale_keys, scale_values)),\n classifications=dict(zip(class_keys, class_values)))\n\n # The styles are in $RIAB_HOME/riab/impact/templates/impact/styles\n return render_to_string('impact/styles/point_classes.sld', params)\n\n def generate_polygon_style(self, data):\n \"\"\"Generates a polygon SLD file based on the data values\n \"\"\"\n\n # FIXME (Ole): Return static style to start with: ticket #144\n style = \"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<sld:StyledLayerDescriptor xmlns=\"http://www.opengis.net/sld\" xmlns:sld=\"http://www.opengis.net/sld\" xmlns:ogc=\"http://www.opengis.net/ogc\" xmlns:gml=\"http://www.opengis.net/gml\" version=\"1.0.0\">\n <sld:NamedLayer>\n <sld:Name>earthquake_impact</sld:Name>\n <sld:UserStyle>\n <sld:Name>earthquake_impact</sld:Name>\n <sld:Title/>\n <sld:FeatureTypeStyle>\n <sld:Name>name</sld:Name>\n <sld:Rule>\n <sld:Name>1</sld:Name>\n <sld:Title>Low</sld:Title>\n <ogc:Filter>\n <ogc:PropertyIsLessThan>\n <ogc:PropertyName>DMGLEVEL</ogc:PropertyName>\n <ogc:Literal>1.5</ogc:Literal>\n </ogc:PropertyIsLessThan>\n </ogc:Filter>\n <sld:PolygonSymbolizer>\n <sld:Fill>\n <sld:CssParameter name=\"fill\">#1EFC7C</sld:CssParameter>\n </sld:Fill>\n <sld:Stroke>\n <sld:CssParameter name=\"stroke\">#0EEC6C</sld:CssParameter>\n </sld:Stroke>\n </sld:PolygonSymbolizer>\n </sld:Rule>\n <sld:Rule>\n <sld:Name>2</sld:Name>\n <sld:Title>Medium</sld:Title>\n <ogc:Filter>\n <ogc:And>\n <ogc:PropertyIsGreaterThanOrEqualTo>\n <ogc:PropertyName>DMGLEVEL</ogc:PropertyName>\n <ogc:Literal>1.5</ogc:Literal>\n </ogc:PropertyIsGreaterThanOrEqualTo>\n <ogc:PropertyIsLessThan>\n <ogc:PropertyName>DMGLEVEL</ogc:PropertyName>\n <ogc:Literal>2.5</ogc:Literal>\n </ogc:PropertyIsLessThan>\n </ogc:And>\n </ogc:Filter>\n <sld:PolygonSymbolizer>\n <sld:Fill>\n <sld:CssParameter name=\"fill\">#FD8D3C</sld:CssParameter>\n </sld:Fill>\n <sld:Stroke>\n <sld:CssParameter name=\"stroke\">#ED7D2C</sld:CssParameter>\n </sld:Stroke>\n </sld:PolygonSymbolizer>\n </sld:Rule>\n <sld:Rule>\n <sld:Name>3</sld:Name>\n <sld:Title>High</sld:Title>\n <ogc:Filter>\n <ogc:PropertyIsGreaterThanOrEqualTo>\n <ogc:PropertyName>DMGLEVEL</ogc:PropertyName>\n <ogc:Literal>2.5</ogc:Literal>\n </ogc:PropertyIsGreaterThanOrEqualTo>\n </ogc:Filter>\n <sld:PolygonSymbolizer>\n <sld:Fill>\n <sld:CssParameter name=\"fill\">#F31A1C</sld:CssParameter>\n </sld:Fill>\n <sld:Stroke>\n <sld:CssParameter name=\"stroke\">#E30A0C</sld:CssParameter>\n </sld:Stroke>\n </sld:PolygonSymbolizer>\n </sld:Rule>\n </sld:FeatureTypeStyle>\n </sld:UserStyle>\n </sld:NamedLayer>\n</sld:StyledLayerDescriptor>\n\"\"\"\n\n return style\n\n def Xgenerate_style(self, data):\n \"\"\"Generates a point SLD file based on the data values\n \"\"\"\n\n # Define default behaviour to be used when\n # - symbol attribute is missing\n # - attribute value is None or ''\n DEFAULT_SYMBOL = 'circle'\n\n symbol_field = None\n\n # FIXME: Replace these by dict and extend below\n symbol_keys = [None, '']\n symbol_values = [DEFAULT_SYMBOL, DEFAULT_SYMBOL]\n\n # Predefined scales and corresponding font sizes\n scale_keys = [10000000000, 10000000, 5000000,\n 1000000, 500000, 250000, 100000]\n scale_values = [3, 5, 8, 12, 14, 16, 18]\n\n # Predefined colour classes\n class_keys = [_('Low damage'), _('Medium damage'), _('High damage')]\n class_values = [{'min': 0.5, 'max': 1.5,\n 'color': '#0efc7c', 'opacity': '1'},\n {'min': 1.5, 'max': 2.5,\n 'color': '#fded0c', 'opacity': '1'},\n {'min': 2.5, 'max': 3.5,\n 'color': '#e31a1c', 'opacity': '1'}]\n\n symbols = {None: DEFAULT_SYMBOL, '': DEFAULT_SYMBOL}\n\n # Generate sld style file\n params = dict(name=data.get_name(),\n damage_field=self.target_field,\n symbol_field=symbol_field,\n symbols=symbols,\n scales=dict(zip(scale_keys, scale_values)),\n classifications=dict(zip(class_keys, class_values)))\n\n # The styles are in $RIAB_HOME/riab/impact/templates/impact/styles\n return render_to_string('impact/styles/point_classes.sld', params)\n" }, { "alpha_fraction": 0.411557137966156, "alphanum_fraction": 0.41626232862472534, "avg_line_length": 31.54067039489746, "blob_id": "f78eb8d13a84a061eef9b889300da091044ff63a", "content_id": "cacf72b2409032af083a733ae98c9d757eb60170", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 6801, "license_type": "no_license", "max_line_length": 93, "num_lines": 209, "path": "/calculator/app/static/script/app/Risiko.js", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "/**\n * Copyright (c) 2009-2011 The Open Planning Project\n */\n\n// define gettext in case we run standalone\nif (!window.gettext) { gettext = function(s) { return s; }; }\n\n/** api: constructor\n * .. class:: Risiko(config)\n *\n * Risiko application.\n */\nvar Risiko = Ext.extend(gxp.Viewer, {\n \n /* @i18n begin */\n layersText: gettext(\"Layers\"),\n legendText: gettext(\"Legend\"),\n /* @i18n end */\n \n /** private: method[constructor]\n * :arg config: ``Object``\n */\n constructor: function(config) {\n\n config = Ext.applyIf(config || {}, {\n \n proxy: \"/proxy?url=\",\n \n portalConfig: {\n layout: \"border\",\n region: \"center\",\n\n // by configuring items here, we don't need to configure portalItems\n // and save a wrapping container\n items: [{\n id: \"centerpanel\",\n xtype: \"panel\",\n layout: \"fit\",\n region: \"center\",\n border: false,\n items: [\"map\"]\n }, {\n id: \"westpanel\",\n xtype: \"tabpanel\",\n region: \"west\",\n split: true,\n collapsible: true,\n hideCollapseTool: true,\n collapseMode: \"mini\",\n width: 200,\n defaults: { autoScroll: true },\n listeners: {\n \"add\": {\n fn: function(cmp) { cmp.setActiveTab(0); },\n single: true\n }\n }\n }, {\n id: \"east\",\n region: \"east\",\n width: 350,\n split: true,\n collapsible: true,\n collapseMode: \"mini\",\n header: false,\n border: true,\n layout: \"vbox\",\n layoutConfig: {\n align: 'stretch',\n pack: 'start',\n defaultMargins: \"2 2 0 2\"\n },\n defaults: {\n padding: 10\n }\n }]\n },\n\n // configuration of all tool plugins for this application\n tools: [{\n ptype: \"gxp_layertree\",\n outputConfig: {\n id: \"tree\",\n title: this.layersText,\n border: true,\n tbar: [] // we will add buttons to \"tree.tbar\" later\n },\n outputTarget: \"westpanel\"\n }, {\n ptype: \"gxp_legend\",\n outputTarget: \"westpanel\",\n outputConfig: {\n title: this.legendText\n }\n }, {\n ptype: \"gxp_addlayers\",\n actionTarget: \"tree.tbar\"\n }, {\n ptype: \"gxp_zoomtolayerextent\",\n actionTarget: \"tree.contextMenu\"\n }, {\n ptype: \"gxp_removelayer\",\n actionTarget: [\"tree.tbar\", \"tree.contextMenu\"]\n }, {\n ptype: \"gxp_zoomtoextent\",\n actionTarget: \"map.tbar\"\n }, {\n ptype: \"gxp_zoom\",\n actionTarget: \"map.tbar\"\n }, {\n ptype: \"gxp_navigationhistory\",\n actionTarget: \"map.tbar\"\n }, {\n actions: [\"-\"]\n }, {\n ptype: \"gxp_wmsgetfeatureinfo\",\n format: \"grid\",\n controlOptions: {maxFeatures: 3},\n outputConfig: {\n maximizable: true\n },\n actionTarget: \"map.tbar\"\n }, {\n ptype: \"gxp_layerproperties\",\n layerPanelConfig: {\n \"gxp_wmslayerpanel\": {\n styling: false\n }\n },\n actionTarget: [\"tree.tbar\", \"tree.contextMenu\"]\n }, {\n ptype: \"gxp_styler\",\n requireDescribeLayer: false,\n actionTarget: [\"tree.tbar\", \"tree.contextMenu\"]\n }, {\n ptype: \"app_calculator\",\n outputTarget: \"east\"\n }],\n\n // map items\n mapItems: [{\n xtype: \"gx_zoomslider\",\n vertical: true,\n height: 100\n }],\n \n // layer sources\n sources: {\n mapbox: {\n ptype: \"gxp_mapboxsource\"\n },\n mapquest: {\n ptype: \"gxp_mapquestsource\"\n }\n }, \n \n map: {\n id: \"map\"\n }\n });\n \n Risiko.superclass.constructor.apply(this, [config]);\n },\n\n /** private: method[loadConfig]\n * :arg config: ``Object``\n * :arc callback: ``Function``\n */\n loadConfig: function(config, callback) {\n Ext.Ajax.request({\n url: \"/maps/new/data\",\n success: function(response) {\n //TODO remove the replace call below when\n // https://github.com/AIFDR/riab/issues/112 is fixed\n var json = response.responseText.replace(/gxp_wmscsource/g, \"gxp_wmssource\");\n var loadedConfig = Ext.decode(json, true);\n Ext.applyIf(loadedConfig.map, config.map);\n Ext.apply(config, loadedConfig);\n callback.call(this, config);\n },\n scope: this\n });\n \n // assume that we're not authorized to do anything (especially style\n // editing), until we get note from /data/acls that we're not anonymous\n this.isAuthorized = OpenLayers.Function.False;\n Ext.Ajax.request({\n url: \"/data/acls\",\n success: function(response) {\n var acls = Ext.decode(response.responseText, true);\n if (acls.is_anonymous === false) {\n this.isAuthorized = OpenLayers.Function.True;\n }\n },\n scope: this\n });\n }\n});\n\n(function() {\n // making symbol names available in UPPERCASE, because Risiko slds use\n // uppercase symbol names.\n //TODO: check if uppercase symbol names are valid according by the spec.\n // If so, make this change in OpenLayers. If not, change it in Risiko.\n var symbol = OpenLayers.Renderer.symbol;\n for (var s in symbol) {\n symbol[s.toUpperCase()] = symbol[s];\n }\n})();\n" }, { "alpha_fraction": 0.5036841034889221, "alphanum_fraction": 0.5317715406417847, "avg_line_length": 37.541263580322266, "blob_id": "8ef53fe32205543de4eee80084595c997af98c08", "content_id": "9c141b6425dc1686410e44a1be2b057ec2efde4f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15879, "license_type": "no_license", "max_line_length": 79, "num_lines": 412, "path": "/impact/tests/test_plugins.py", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "import unittest\nfrom impact import plugins\n\nimport numpy\nimport sys\nimport os\nimport unittest\nimport warnings\n\nfrom geonode.maps.utils import upload, file_upload, GeoNodeException\n\nfrom impact.views import calculate\nfrom impact.plugins.core import FunctionProvider\nfrom impact.plugins.core import requirements_collect\nfrom impact.plugins.core import requirement_check\nfrom impact.plugins.core import get_plugins\nfrom impact.plugins.core import compatible_layers\n\nfrom impact.storage.io import get_layer_descriptors\n\nfrom impact.models import Calculation, Workspace\n\nfrom impact.storage.io import save_to_geonode, check_layer\nfrom impact.storage.io import download\nfrom impact.storage.io import read_layer\nfrom impact.tests.utilities import TESTDATA\nfrom django.test.client import Client\nfrom django.conf import settings\nfrom django.utils import simplejson as json\nfrom geonode.maps.utils import get_valid_user\n\nfrom impact.tests.utilities import TESTDATA, INTERNAL_SERVER_URL\n\nDEFAULT_PLUGINS = ('Earthquake Fatality Function',)\n\n\n# FIXME (Ole): Change H, E to layers.\nclass BasicFunction(FunctionProvider):\n \"\"\"Risk plugin for testing\n\n :author Allen\n :rating 1\n :param requires category==\"hazard\"\n \"\"\"\n\n @staticmethod\n def run(H, E,\n a=0.97429, b=11.037):\n\n return None\n\n\ndef padang_check_results(mmi, building_class):\n \"\"\"Check calculated results through a lookup table\n returns False if the lookup fails and\n an exception if more than one lookup returned\"\"\"\n\n # Reference table established from plugin as of 28 July 2011\n # It was then manually verified against an Excel table by Abbie Baca\n # and Ted Dunstone. Format is\n # MMI, Building class, impact [%]\n padang_verified_results = [\n [7.50352, 1, 50.17018],\n [7.49936, 1, 49.96942],\n [7.63961, 2, 20.35277],\n [7.09855, 2, 5.895076],\n [7.49990, 3, 7.307292],\n [7.80284, 3, 13.71306],\n [7.66337, 4, 3.320895],\n [7.12665, 4, 0.050489],\n [7.12665, 5, 1.013092],\n [7.85400, 5, 7.521769],\n [7.54040, 6, 4.657564],\n [7.48122, 6, 4.167858],\n [7.31694, 6, 3.008460],\n [7.54057, 7, 1.349811],\n [7.12753, 7, 0.177422],\n [7.61912, 7, 1.866942],\n [7.64828, 8, 1.518264],\n [7.43644, 8, 0.513577],\n [7.12665, 8, 0.075070],\n [7.64828, 9, 1.731623],\n [7.48122, 9, 1.191497],\n [7.12665, 9, 0.488944]]\n\n impact_array = [verified_impact\n for verified_mmi, verified_building_class, verified_impact\n in padang_verified_results\n if numpy.allclose(verified_mmi, mmi, rtol=1.0e-6) and\n numpy.allclose(verified_building_class, building_class,\n rtol=1.0e-6)]\n\n if len(impact_array) == 0:\n return False\n elif len(impact_array) == 1:\n return impact_array[0]\n\n msg = 'More than one lookup result returned. May be precision error.'\n assert len(impact_array) < 2, msg\n\n # FIXME (Ole): Count how many buildings were damaged in each category?\n\n\nclass Test_plugins(unittest.TestCase):\n \"\"\"Tests of Risiko calculations\n \"\"\"\n\n def setUp(self):\n \"\"\"Create valid superuser\n \"\"\"\n self.user = get_valid_user()\n\n def test_get_plugins(self):\n \"\"\"It is possible to retrieve the list of functions\n \"\"\"\n plugin_list = plugins.get_plugins()\n msg = ('No plugins were found, not even the built-in ones')\n assert len(plugin_list) > 0, msg\n\n def test_single_get_plugins(self):\n \"\"\"Named plugin can be retrieved\n \"\"\"\n plugin_name = DEFAULT_PLUGINS[0]\n plugin_list = plugins.get_plugins(plugin_name)\n msg = ('No plugins were found matching %s' % plugin_name)\n assert len(plugin_list) > 0, msg\n\n def test_get_plugins(self):\n \"\"\"Plugins can be collected\n \"\"\"\n\n plugin_list = get_plugins()\n assert(len(plugin_list) > 0)\n\n # Check that every plugin has a requires line\n for plugin in plugin_list.values():\n requirements = requirements_collect(plugin)\n msg = 'There were no requirements in plugin %s' % plugin\n assert(len(requirements) > 0), msg\n\n for req_str in requirements:\n msg = 'All plugins should return True or False'\n assert(requirement_check({'category': 'hazard',\n 'subcategory': 'earthquake',\n 'layerType': 'raster'},\n req_str) in [True, False]), msg\n\n def test_requirements_check(self):\n \"\"\"Plugins are correctly filtered based on requirements\"\"\"\n\n plugin_list = get_plugins('BasicFunction')\n assert(len(plugin_list) == 1)\n\n requirements = requirements_collect(plugin_list[0].values()[0])\n msg = 'Requirements are %s' % requirements\n assert(len(requirements) == 1), msg\n for req_str in requirements:\n msg = 'Should eval to True'\n assert(requirement_check({'category': 'hazard'},\n req_str) is True), msg\n msg = 'Should eval to False'\n assert(requirement_check({'broke': 'broke'},\n req_str) is False), msg\n\n try:\n plugin_list = get_plugins('NotRegistered')\n except AssertionError:\n pass\n else:\n msg = 'Search should fail'\n raise Exception(msg)\n\n def test_plugin_compatibility(self):\n \"\"\"Default plugins perform as expected\n \"\"\"\n\n # Upload a raster and a vector data set\n hazard_filename = os.path.join(TESTDATA,\n 'shakemap_padang_20090930.asc')\n hazard_layer = save_to_geonode(hazard_filename)\n check_layer(hazard_layer, full=True)\n\n exposure_filename = os.path.join(TESTDATA,\n 'lembang_schools.shp')\n exposure_layer = save_to_geonode(exposure_filename)\n check_layer(exposure_layer, full=True)\n\n # Test\n plugin_list = get_plugins()\n assert len(plugin_list) > 0\n\n geoserver = {'url': settings.GEOSERVER_BASE_URL + 'ows',\n 'name': 'Local Geoserver',\n 'version': '1.0.0',\n 'id': 0}\n metadata = get_layer_descriptors(geoserver['url'])\n\n msg = 'There were no layers in test geoserver'\n assert len(metadata) > 0, msg\n\n # Characterisation test to preserve the behaviour of\n # get_layer_descriptors. FIXME: I think we should change this to be\n # a dictionary of metadata entries (ticket #126).\n reference = [['geonode:lembang_schools',\n {'layer_type': 'vector',\n 'category': 'exposure',\n 'subcategory': 'building',\n 'title': 'lembang_schools'}],\n ['geonode:shakemap_padang_20090930',\n {'layer_type': 'raster',\n 'category': 'hazard',\n 'subcategory': 'earthquake',\n 'title': 'shakemap_padang_20090930'}]]\n\n for entry in reference:\n name, mdblock = entry\n\n i = [x[0] for x in metadata].index(name)\n\n msg = 'Got name %s, expected %s' % (name, metadata[i][0])\n assert name == metadata[i][0], msg\n for key in entry[1]:\n refval = entry[1][key]\n val = metadata[i][1][key]\n msg = ('Got value \"%s\" for key \"%s\" '\n 'Expected \"%s\"' % (val, key, refval))\n assert refval == val, msg\n\n # Check plugins are returned\n annotated_plugins = [{'name': name,\n 'doc': f.__doc__,\n 'layers': compatible_layers(f, metadata)}\n for name, f in plugin_list.items()]\n\n msg = 'No compatible layers returned'\n assert len(annotated_plugins) > 0, msg\n\n def test_django_plugins(self):\n \"\"\"Django plugin functions can be retrieved correctly\n \"\"\"\n\n c = Client()\n rv = c.post('/impact/api/functions/', data={})\n\n self.assertEqual(rv.status_code, 200)\n self.assertEqual(rv['Content-Type'], 'application/json')\n data = json.loads(rv.content)\n\n def test_plugin_selection(self):\n \"\"\"Verify the plugins can recognize compatible layers.\n \"\"\"\n # Upload a raster and a vector data set\n hazard_filename = os.path.join(TESTDATA,\n 'Earthquake_Ground_Shaking.asc')\n hazard_layer = save_to_geonode(hazard_filename,\n user=self.user,\n overwrite=True)\n check_layer(hazard_layer, full=True)\n\n msg = 'No keywords found in layer %s' % hazard_layer.name\n assert len(hazard_layer.keywords) > 0, msg\n\n exposure_filename = os.path.join(TESTDATA,\n 'lembang_schools.shp')\n exposure_layer = save_to_geonode(exposure_filename)\n check_layer(exposure_layer, full=True)\n msg = 'No keywords found in layer %s' % exposure_layer.name\n assert len(exposure_layer.keywords) > 0, msg\n\n c = Client()\n rv = c.post('/impact/api/functions/', data={})\n\n self.assertEqual(rv.status_code, 200)\n self.assertEqual(rv['Content-Type'], 'application/json')\n data = json.loads(rv.content)\n\n assert 'functions' in data\n\n functions = data['functions']\n\n # FIXME (Ariel): This test should implement an alternative function to\n # parse the requirements, but for now it will just take the buildings\n # damage one.\n for function in functions:\n if function['name'] == 'Earthquake Building Damage Function':\n layers = function['layers']\n\n msg_tmpl = 'Expected layer %s in list of compatible layers: %s'\n\n hazard_msg = msg_tmpl % (hazard_layer.typename, layers)\n assert hazard_layer.typename in layers, hazard_msg\n\n exposure_msg = msg_tmpl % (exposure_layer.typename, layers)\n assert exposure_layer.typename in layers, exposure_msg\n\n def test_padang_building_examples(self):\n \"\"\"Padang building impact calculation works through the API\n \"\"\"\n\n # Test for a range of hazard layers\n for mmi_filename in ['Shakemap_Padang_2009.asc']:\n #'Lembang_Earthquake_Scenario.asc']:\n\n # Upload input data\n hazardfile = os.path.join(TESTDATA, mmi_filename)\n hazard_layer = save_to_geonode(hazardfile, user=self.user)\n hazard_name = '%s:%s' % (hazard_layer.workspace,\n hazard_layer.name)\n\n exposurefile = os.path.join(TESTDATA, 'Padang_WGS84.shp')\n exposure_layer = save_to_geonode(exposurefile, user=self.user)\n exposure_name = '%s:%s' % (exposure_layer.workspace,\n exposure_layer.name)\n\n # Call calculation routine\n\n # FIXME (Ole): The system freaks out if there are spaces in\n # bbox string. Please let us catch that and deal\n # nicely with it - also do this in download()\n bbox = '96.956, -5.51, 104.63933, 2.289497'\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n\n c = Client()\n rv = c.post('/impact/api/calculate/', data=dict(\n hazard_server=INTERNAL_SERVER_URL,\n hazard=hazard_name,\n exposure_server=INTERNAL_SERVER_URL,\n exposure=exposure_name,\n bbox=bbox,\n impact_function='Padang Earthquake ' \\\n 'Building Damage Function',\n keywords='test,buildings,padang',\n ))\n\n self.assertEqual(rv.status_code, 200)\n self.assertEqual(rv['Content-Type'], 'application/json')\n data = json.loads(rv.content)\n assert 'hazard_layer' in data.keys()\n assert 'exposure_layer' in data.keys()\n assert 'run_duration' in data.keys()\n assert 'run_date' in data.keys()\n assert 'layer' in data.keys()\n\n # Download result and check\n layer_name = data['layer'].split('/')[-1]\n\n result_layer = download(INTERNAL_SERVER_URL,\n layer_name,\n bbox)\n assert os.path.exists(result_layer.filename)\n\n # Read hazard data for reference\n hazard_raster = read_layer(hazardfile)\n A = hazard_raster.get_data()\n mmi_min, mmi_max = hazard_raster.get_extrema()\n\n # Read calculated result\n impact_vector = read_layer(result_layer.filename)\n coordinates = impact_vector.get_geometry()\n attributes = impact_vector.get_data()\n\n # Verify calculated result\n count = 0\n verified_count = 0\n for i in range(len(attributes)):\n lon, lat = coordinates[i][:]\n calculated_mmi = attributes[i]['MMI']\n\n if calculated_mmi == 0.0:\n # FIXME (Ole): Some points have MMI==0 here.\n # Weird but not a show stopper\n continue\n\n # Check that interpolated points are within range\n msg = ('Interpolated mmi %f was outside extrema: '\n '[%f, %f] at location '\n '[%f, %f]. ' % (calculated_mmi,\n mmi_min, mmi_max,\n lon, lat))\n assert mmi_min <= calculated_mmi <= mmi_max, msg\n\n building_class = attributes[i]['TestBLDGCl']\n\n # Check calculated damage\n calculated_dam = attributes[i]['DAMAGE']\n verified_dam = padang_check_results(calculated_mmi,\n building_class)\n #print calculated_mmi, building_class, calculated_dam\n if verified_dam:\n msg = ('Calculated damage was not as expected '\n 'for hazard layer %s. I got %f '\n 'but expected %f' % (hazardfile,\n calculated_dam,\n verified_dam))\n assert numpy.allclose(calculated_dam, verified_dam,\n rtol=1.0e-4), msg\n verified_count += 1\n count += 1\n\n msg = ('No points was verified in output. Please create '\n 'table withe reference data')\n assert verified_count > 0, msg\n msg = 'Number buildings was not 3896.'\n assert count == 3896, msg\n\nif __name__ == '__main__':\n os.environ['DJANGO_SETTINGS_MODULE'] = 'risiko.settings'\n suite = unittest.makeSuite(Test_plugins, 'test')\n runner = unittest.TextTestRunner(verbosity=2)\n runner.run(suite)\n" }, { "alpha_fraction": 0.5543453097343445, "alphanum_fraction": 0.576593279838562, "avg_line_length": 38.227272033691406, "blob_id": "aa11677fba8b8a75ce9ef4383dd23008363e58c6", "content_id": "c80d7e08f28afe9c18fb6f951e9fb2e256bc28a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4315, "license_type": "no_license", "max_line_length": 195, "num_lines": 110, "path": "/impact/plugins/tsunami/tsunami_population_impact.py", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "import numpy\n\nfrom impact.plugins.core import FunctionProvider\nfrom impact.storage.raster import Raster\nfrom django.utils.translation import ugettext as _\n\n\nclass TsunamiPopulationImpactFunction(FunctionProvider):\n \"\"\"Risk plugin for tsunami impact on population data\n\n :param requires category==\"hazard\" and \\\n subcategory.startswith(\"tsunami\") and \\\n layer_type==\"raster\"\n :param requires category==\"exposure\" and \\\n subcategory.startswith(\"population\") and \\\n layer_type==\"raster\"\n\n \"\"\"\n\n def run(self, layers):\n \"\"\"Risk plugin for tsunami population\n \"\"\"\n\n thresholds = [0.2, 0.3, 0.5, 0.8, 1.0]\n #threshold = 1 # Depth above which people are regarded affected [m]\n\n # Identify hazard and exposure layers\n inundation = layers[0] # Tsunami inundation [m]\n population = layers[1] # Population density\n\n # Extract data as numeric arrays\n D = inundation.get_data(nan=0.0) # Depth\n P = population.get_data(nan=0.0, scaling=True) # Population density\n\n # Calculate impact as population exposed to depths > 1m\n I_map = numpy.where(D > thresholds[-1], P, 0)\n\n # Generate text with result for this study\n number_of_people_affected = numpy.nansum(I_map.flat)\n\n # Do breakdown\n\n # Create report\n caption = ('<table border=\"0\" width=\"320px\">'\n ' <tr><th><b>%s</b></th><th><b>%s</b></th></th>'\n ' <tr></tr>' % ('Ambang batas', 'Jumlah orang terdampak'))\n\n counts = []\n for i, threshold in enumerate(thresholds):\n I = numpy.where(D > threshold, P, 0)\n counts.append(numpy.nansum(I.flat))\n\n caption += ' <tr><td>%s m</td><td>%i</td></tr>' % (threshold,\n counts[i])\n\n caption += '</table>'\n\n # Create raster object and return\n R = Raster(I_map,\n projection=inundation.get_projection(),\n geotransform=inundation.get_geotransform(),\n name='People affected by more than 1m of inundation',\n keywords={'caption': caption})\n return R\n\n def generate_style(self, data):\n \"\"\"Generates and SLD file based on the data values\n \"\"\"\n\n s = \"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<sld:StyledLayerDescriptor xmlns=\"http://www.opengis.net/sld\" xmlns:sld=\"http://www.opengis.net/sld\" xmlns:ogc=\"http://www.opengis.net/ogc\" xmlns:gml=\"http://www.opengis.net/gml\" version=\"1.0.0\">\n <sld:NamedLayer>\n <sld:Name>People affected by more than 1m of inundation</sld:Name>\n <sld:UserStyle>\n <sld:Name>People affected by more than 1m of inundation</sld:Name>\n <sld:Title>People Affected By More Than 1m Of Inundation</sld:Title>\n <sld:Abstract>People Affected By More Than 1m Of Inundation</sld:Abstract>\n <sld:FeatureTypeStyle>\n <sld:Name>People affected by more than 1m of inundation</sld:Name>\n <sld:Rule>\n <sld:RasterSymbolizer>\n <sld:Geometry>\n <ogc:PropertyName>geom</ogc:PropertyName>\n </sld:Geometry>\n <sld:ChannelSelection>\n <sld:GrayChannel>\n <sld:SourceChannelName>1</sld:SourceChannelName>\n </sld:GrayChannel>\n </sld:ChannelSelection>\n <sld:ColorMap>\n <sld:ColorMapEntry color=\"#ffffff\" opacity=\"0\" quantity=\"-9999.0\"/>\n <sld:ColorMapEntry color=\"#38A800\" opacity=\"0\" quantity=\"0.01\"/>\n <sld:ColorMapEntry color=\"#38A800\" quantity=\"0.02\"/>\n <sld:ColorMapEntry color=\"#79C900\" quantity=\"0.05\"/>\n <sld:ColorMapEntry color=\"#CEED00\" quantity=\"0.1\"/>\n <sld:ColorMapEntry color=\"#FFCC00\" quantity=\"0.2\"/>\n <sld:ColorMapEntry color=\"#FF6600\" quantity=\"0.3\"/>\n <sld:ColorMapEntry color=\"#FF0000\" quantity=\"0.5\"/>\n <sld:ColorMapEntry color=\"#7A0000\" quantity=\"0.9\"/>\n </sld:ColorMap>\n </sld:RasterSymbolizer>\n </sld:Rule>\n </sld:FeatureTypeStyle>\n </sld:UserStyle>\n </sld:NamedLayer>\n</sld:StyledLayerDescriptor>\n\n \"\"\"\n\n return s\n" }, { "alpha_fraction": 0.5502594113349915, "alphanum_fraction": 0.5995460152626038, "avg_line_length": 26.783782958984375, "blob_id": "b99162e7bcb3a38d29a6ec38ae335778df255d5f", "content_id": "88cb542e2776ca96785855761338abafc0fbee15", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3084, "license_type": "no_license", "max_line_length": 78, "num_lines": 111, "path": "/impact/tests/utilities.py", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "import os\nimport time\nimport types\nimport numpy\nfrom django.conf import settings\nfrom impact.storage.io import download, get_bounding_box, get_metadata\n\nTESTDATA = os.path.join(os.environ['RIAB_HOME'], 'risiko_test_data')\n\n# Use the local GeoServer url inside GeoNode\n# The ows bit at the end if VERY important because\n# that is the endpoint of the OGC services.\nINTERNAL_SERVER_URL = os.path.join(settings.GEOSERVER_BASE_URL, 'ows')\n\n# Known feature counts in test data\nFEATURE_COUNTS = {'lembang_schools.shp': 144,\n 'tsunami_exposure_BB.shp': 7529,\n 'kecamatan_geo.shp': 42,\n 'Padang_WGS84.shp': 3896,\n 'OSM_building_polygons_20110905.shp': 34960,\n 'OSM_subset.shp': 79}\n\n# For testing\nGEOTRANSFORMS = [(105.3000035, 0.008333, 0.0, -5.5667785, 0.0, -0.008333),\n (105.29857, 0.0112, 0.0, -5.565233000000001, 0.0, -0.0112),\n (96.956, 0.03074106, 0.0, 2.2894972560001, 0.0, -0.03074106)]\n\n\ndef _same_API(X, Y, exclude=None):\n \"\"\"Check that public methods of X also exist in Y\n \"\"\"\n\n if exclude is None:\n exclude = []\n\n for name in dir(X):\n\n # Skip internal symbols\n if name.startswith('_'):\n continue\n\n # Skip explicitly excluded methods\n if name in exclude:\n continue\n\n # Check membership of methods\n attr = getattr(X, name)\n if isinstance(attr, types.MethodType):\n if name not in dir(Y):\n msg = 'Method %s of %s was not found in %s' % (name, X, Y)\n raise Exception(msg)\n\n\ndef same_API(X, Y, exclude=None):\n \"\"\"Check that public methods of X and Y are the same.\n\n Input\n X, Y: Python objects\n exclude: List of names to exclude from comparison or None\n \"\"\"\n\n _same_API(X, Y, exclude=exclude)\n _same_API(Y, X, exclude=exclude)\n\n return True\n\n\ndef get_web_page(url, username=None, password=None):\n \"\"\"Get url page possible with username and password.\n \"\"\"\n import urllib2\n\n if username is not None:\n\n # Create password manager\n passman = urllib2.HTTPPasswordMgrWithDefaultRealm()\n passman.add_password(None, url, username, password)\n\n # create the handler\n authhandler = urllib2.HTTPBasicAuthHandler(passman)\n opener = urllib2.build_opener(authhandler)\n urllib2.install_opener(opener)\n\n try:\n pagehandle = urllib2.urlopen(url)\n except HTTPError, e:\n msg = ('The server couldn\\'t fulfill the request. '\n 'Error code: ' % e.code)\n e.args = (msg,)\n raise\n except urllib2.URLError, e:\n msg = 'Could not open URL \"%s\": %s' % (url, e)\n e.args = (msg,)\n raise\n else:\n page = pagehandle.readlines()\n\n return page\n\n\ndef combine_coordinates(x, y):\n \"\"\"Make list of all combinations of points for x and y coordinates\n \"\"\"\n\n points = []\n for px in x:\n for py in y:\n points.append((px, py))\n points = numpy.array(points)\n\n return points\n" }, { "alpha_fraction": 0.7028627991676331, "alphanum_fraction": 0.7166830897331238, "avg_line_length": 35.14285659790039, "blob_id": "d76d25de02d4b8e3f761649615a36f60c40389fd", "content_id": "b3f8dc1fc94e8fca951ba3f2a4457a265833a2f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1013, "license_type": "no_license", "max_line_length": 118, "num_lines": 28, "path": "/docs/usage/faq.rst", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "Frequently Asked Questions\n==========================\n\n\n\nHow do I rename a shape file and all the helper files?\n::\n Use the rename command. rename [ -v ] [ -n ] [ -f ] perlexpr [ files ].\n For example\n rename -v 's/^building/OSM_building_polygons_20110905/' building.*\n\n\nMy Risiko production server is live but no map shows?\n::\n Try to login and restart tomcat: sudo /etc/init.d/tomcat6 restart\n\nHow do I get Open Street Map building data?\n::\n For Indonesia, you can download latest collections at http://data.kompetisiosm.org\n\nHow do I take screen capture e.g. for use in a presentation?\n::\n On Ubuntu, get the packages gtk-recordmydesktop and mencoder\n Record using recordmydesktop (start and stop icon in the top bar)\n Convert to other formats using mencoder, e.g.\n mencoder -idx yogya_analysis-6.ogv -ovc lavc -oac lavc -lavcopts vcodec=mpeg4:vpass=1 -of lavf -o yogya_analysis.avi\n or\n mencoder -idx yogya_analysis-6.ogv -ovc lavc -oac lavc -lavcopts vcodec=wmv2 -of lavf -o yogya_analysis.wmv\n\n" }, { "alpha_fraction": 0.48402783274650574, "alphanum_fraction": 0.49044370651245117, "avg_line_length": 34.00472640991211, "blob_id": "ceb6095d3318f41fd134d7e4231f6b4c58ecf576", "content_id": "9961f21ffa3e83821bd4f9b9522a6395529bc596", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 14807, "license_type": "no_license", "max_line_length": 177, "num_lines": 423, "path": "/calculator/app/static/script/app/Risiko/Calculator.js", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "/**\n * Copyright (c) 2009-2011 The Open Planning Project\n */\n\n/*\n * @requires Risiko.js\n */\n\n/** api: constructor\n * .. class:: Risiko.Calculator\n *\n * Risiko Calculator plugin.\n */\nRisiko.Calculator = Ext.extend(gxp.plugins.Tool, {\n\n ptype: \"app_calculator\",\n\n /* @i18n begin */\n hazardComboLabelText: gettext(\"Hazard\"),\n exposureComboLabelText: gettext(\"Exposure\"),\n functionComboLabelText: gettext(\"Function\"),\n resetButtonText: gettext(\"Reset\"),\n calculateButtonText: gettext(\"Calculate\"),\n calculatingText: gettext(\"Calculating\"),\n calculatorTitleText: gettext(\"Impact Calculator\"),\n hazardSelectText: gettext(\"Select Hazard ...\"),\n exposureSelectText: gettext(\"Select Exposure ...\"),\n functionSelectText: gettext(\"Select Function ...\"),\n /* @i18n end */\n\n addOutput: function(config) {\n\n var exposurestore, hazardstore, combo_functionstore, popup,\n functionstore, bboxLayer,\n app = this.target,\n lastHazardSelect = \"None\",\n lastExposureSelect = \"None\",\n lastImpactSelect = \"None\",\n lastImpactLayer = \"None\";\n\n function drawBox(bbox) {\n var map = app.mapPanel.map;\n\n if (bboxLayer) {\n bboxLayer.destroy();\n bboxLayer = null;\n }\n bboxLayer = new OpenLayers.Layer.Vector(\"Calculation Extent\", {\n styleMap: new OpenLayers.StyleMap({\n strokeColor: \"#000000\",\n strokeOpacity: 0.2,\n strokeWidth: 3,\n fillColor: \"#00FF00\",\n fillOpacity: 0\n })\n });\n\n var feature = new OpenLayers.Feature.Vector(bbox.toGeometry());\n bboxLayer.addFeatures([feature]);\n\n map.addLayer(bboxLayer);\n };\n\n exposurestore = new Ext.data.JsonStore({\n id: 'exposurestore',\n fields: ['name', 'title', 'server_url'],\n autoLoad: true,\n url: '/impact/api/layers/?category=exposure',\n root: 'objects'\n });\n\n hazardstore = new Ext.data.JsonStore({\n id: 'hazardstore',\n fields: ['name', 'title', 'server_url'],\n autoLoad: true,\n url: '/impact/api/layers/?category=hazard',\n root: 'objects'\n });\n\n combo_functionstore = new Ext.data.JsonStore({\n id: 'combo_functionstore',\n fields: ['name', 'doc', 'layers'],\n root: 'functions'\n });\n\n function addLayer(server_url, label, layer_name, opacity_value, callback) {\n var record = app.createLayerRecord({\n name: layer_name,\n title: label,\n opacity: opacity_value,\n source: \"0\"\n }, function(rec) {\n var layer = rec.getLayer();\n rec.getLayer().attribution = \"My attribution\";\n app.mapPanel.layers.add(rec);\n if (callback) {\n callback(rec);\n }\n });\n }\n\n function createPopup(feature) {\n \tvar content = \"<div style='font-size:.9em; width:270px;'><b>\" + feature.attributes.name + \"</b><hr />\" + \"</div>\";\n \tpopup = new GeoExt.Popup({\n \t\ttitle: 'Details',\n \t\tfeature: feature,\n \t\twidth:270,\n \t\theight:170,\n \t\thtml: content,\n \t\tcollapsible: true\n \t});\n \tpopup.on({\n \t\tclose: function() {\n \t\t\tif(OpenLayers.Util.indexOf(vecLayer.selectedFeatures, this.feature) > -1) {\n \t\t\t\tselectControl.unselect(this.feature);\n \t\t\t}\n \t\t}\n \t});\n \tpopup.show();\n }\n\n\n function removeLayer(layer_name){\n var map = app.mapPanel.map;\n var layers = map.getLayersByName(layer_name);\n if (layers.length > 0) {\n \t//for each(var lay in layers){\n \t\tmap.removeLayer(layers[0]);\n \t// }\n }\n }\n\n function addLayerFromCombo(combo){\n var layer_name = combo.value;\n var id = combo.store.find('name', combo.value,0,true,false);\n var item = combo.store.data.items[id];\n addLayer(item.data.server_url, layer_name, layer_name, 0.5);\n }\n\n function hazardSelected(combo){\n removeLayer(lastHazardSelect);\n addLayerFromCombo(combo);\n Ext.getCmp('exposurecombo').enable();\n Ext.getCmp('functioncombo').disable();\n lastHazardSelect = combo.getValue();\n }\n\n // Need function store separate from the function combo box\n // since the combo box is rebuilt depending on the selection\n functionstore = new Ext.data.JsonStore({\n id: 'functionstore',\n fields: ['name', 'doc', 'layers'],\n autoLoad: true,\n url: '/impact/api/functions/',\n root: 'functions'\n });\n\n function reset_view() {\n var exposure = Ext.getCmp('exposurecombo');\n var hazard = Ext.getCmp('hazardcombo');\n\n removeLayer(exposure.getValue());\n removeLayer(hazard.getValue());\n removeLayer(lastImpactLayer);\n if (bboxLayer && bboxLayer.map) {\n app.mapPanel.map.removeLayer(bboxLayer);\n }\n lastImpactSelect = \"None\";\n lastExposureSelect = \"None\";\n lastHazardSelect = \"None\";\n exposure.setValue(\"\");\n hazard.setValue(\"\");\n exposure.disable();\n Ext.getCmp('functioncombo').disable();\n Ext.getCmp('functioncombo').setValue(\"\");\n Ext.getCmp('resultpanel').getEl().update('');\n }\n\n function exposureSelected(combo){\n addLayerFromCombo(combo);\n // Get the complete list of functions and it's compatible layers\n var fCombo = Ext.getCmp('functioncombo');\n\n var hazard_name = Ext.getCmp('hazardcombo').value;\n var exposure_name = Ext.getCmp('exposurecombo').value;\n\n removeLayer(lastExposureSelect);\n lastExposureSelect = exposure_name;\n\n Ext.getCmp('functioncombo').enable();\n var items = functionstore.data.items;\n\n // Clear the function combobox\n fCombo.store.removeAll();\n fCombo.store.totalLength = 0;\n\n for (var ii=0; ii<items.length; ii++) {\n \tvar item = items[ii];\n \tif (item.data == undefined){\n continue;\n }\n var name = item.data.name;\n var layers = item.data.layers;\n var found_exposure = false;\n var found_hazard = false;\n // Find if hazard is in layers\n for (var li=0; li<layers.length; li++) {\n \t var lay=layers[li];\n if (lay == exposure_name) {\n found_exposure = true;\n }\n if (lay == hazard_name) {\n found_hazard = true;\n }\n }\n\n if (found_exposure && found_hazard) {\n \t // add the function name to the combo box\n \t fCombo.store.insert(0, new Ext.data.Record({name:name}));\n \t fCombo.setValue(name);\n }\n }\n\n }\n\n function showCaption(caption){\n var output = '<div>' + caption + '</div>';\n var resultPanel = Ext.getCmp('resultpanel').getEl().update(output);\n }\n\n function received(result, request) {\n var progressbar = Ext.getCmp('calculateprogress');\n progressbar.reset();\n progressbar.hide();\n\n var data = Ext.decode( result.responseText );\n if (data.errors !== null){\n Ext.MessageBox.alert('Calculation Failed:', data.errors);\n return;\n }\n reset_view();\n removeLayer(lastImpactLayer);\n var layer_uri = data.layer;\n var run_date = data.run_date;\n var run_duration = data.run_duration;\n var bbox = data.bbox;\n var caption = data.caption;\n var excel = data.excel;\n var exposure = data.exposure_layer;\n var hazard = data.hazard_layer;\n var base_url = layer_uri.split('/')[2];\n var server_url = data.ows_server_url;\n var result_name = layer_uri.split('/')[4].split(':')[1];\n var result_label = exposure + ' X ' + hazard + '=' +result_name;\n app.layerSources[\"0\"].store.reload({\n callback: function() {\n addLayer(server_url, result_label, \"geonode:\"+result_name, 0.9, function(rec) {\n drawBox(rec.getLayer().maxExtent);\n });\n lastImpactLayer = result_label;\n var layer_link = '<a target=\"_blank\" href=\"'+ layer_uri + '\">Hasil peta</a><br><br>';\n var excel_link = '';\n if (excel !== undefined) {\n excel_link = '<a href=\"'+ excel + '\">Hasil table</a>';\n };\n showCaption(caption + '<br><br>' + layer_link + excel_link);\n }\n });\n }\n\n function calculate() {\n var hazardcombo = Ext.getCmp('hazardcombo');\n var exposurecombo = Ext.getCmp('exposurecombo');\n var hazardid = hazardcombo.store.find('name', hazardcombo.value,0,true,false);\n var exposureid = exposurecombo.store.find('name', exposurecombo.value,0,true,false);\n var hazarditem = hazardcombo.store.data.items[hazardid];\n var exposureitem = exposurecombo.store.data.items[exposureid];\n\n var hazard_layer = hazarditem.data.name;\n var exposure_layer = exposureitem.data.name;\n var hazard_server = hazarditem.data.server_url;\n var exposure_server = exposureitem.data.server_url;\n\n var impact_function = Ext.getCmp('functioncombo').getValue();\n var progressbar = Ext.getCmp('calculateprogress');\n\n var map = app.mapPanel.map;\n var bounds_original = map.getExtent();\n var bounds = bounds_original.transform(\n new OpenLayers.Projection('EPSG:900913'), new OpenLayers.Projection('EPSG:4326')\n );\n var bbox = bounds.toBBOX();\n progressbar.show();\n progressbar.wait({\n interval: 100,\n duration: 50000,\n \t increment: 5\n \t});\n\n Ext.Ajax.request({\n url: '/impact/api/calculate/',\n loadMask: true,\n params: {\n hazard_server: hazard_server,\n hazard: hazard_layer,\n exposure_server: hazard_server,\n exposure: exposure_layer,\n bbox: bbox,\n keywords: 'test,riab_client',\n impact_function: impact_function\n },\n method: 'POST',\n timeout: 1200000, // 20 minutes\n success: received,\n failure: function ( result, request) {\n progressbar.hide();\n progressbar.reset();\n Ext.MessageBox.alert('Failed', result.responseText);\n }\n });\n }\n\n return Risiko.Calculator.superclass.addOutput.apply(this, [[{\n id: \"calcform\",\n title: this.calculatorTitleText,\n xtype: 'form',\n labelWidth: 80,\n frame: true,\n height: 200,\n border: false,\n items: [{\n xtype: 'combo',\n id: 'hazardcombo',\n store: hazardstore,\n displayField: 'title',\n valueField: 'name',\n fieldLabel: this.hazardComboLabelText,\n typeAhead: true,\n mode: 'local',\n triggerAction: 'all',\n emptyText: this.hazardSelectText,\n selectOnFocus:false,\n listeners: {\n \"select\": hazardSelected\n }\n }, {\n xtype: 'combo',\n id: 'exposurecombo',\n store: exposurestore,\n displayField: 'title',\n valueField: 'name',\n fieldLabel: this.exposureComboLabelText,\n typeAhead: true,\n mode: 'local',\n triggerAction: 'all',\n emptyText: this.exposureSelectText,\n selectOnFocus:false,\n disabled: true,\n listeners: {\n \"select\": exposureSelected\n }\n }, {\n xtype: 'combo',\n id: 'functioncombo',\n store: combo_functionstore,\n displayField: 'name',\n valueField: 'name',\n fieldLabel: this.functionComboLabelText,\n typeAhead: true,\n mode: 'local',\n triggerAction: 'all',\n disabled: true,\n emptyText: this.functionSelectText,\n selectOnFocus:false\n }, {\n xtype: 'progress',\n id: 'calculateprogress',\n\t\t cls: 'right-align',\n displayField:'name',\n\t\t fieldLabel: this.calculatingText,\n hidden: true\n\t }],\n\t\t buttons: [{\n\t\t text: this.resetButtonText,\n\t\t\t handler: reset_view\n\t\t\t}, {\n\t\t\t\ttext: this.calculateButtonText,\n handler: calculate\n\t\t\t}]\n }, {\n id: \"resultpanelcontainer\",\n title: 'Kalkulasi Hasil',\n flex: 1,\n frame: true,\n border: true,\n autoScroll: true,\n items: [{\n id: \"resultpanel\",\n html: \"\"\n }],\n xtype: \"panel\",\n defaults: {\n hideBorders: true\n }\n }, {\n id: \"logopanel\",\n height: 180,\n frame: false,\n border: false,\n html: \"<div><p>\"+\n \"<a href='http://bnpb.go.id' target='_blank'><img src='theme/app/img/bnpb_logo.png' alt='BNPB' title='BNPB' style='padding-left: 100px; float: left' /></a>\"+\n \"</p></div>\",\n xtype: \"panel\",\n defaults: {\n hideBorders: false\n }\n }]]);\n }\n\n});\n\n\nExt.preg(Risiko.Calculator.prototype.ptype, Risiko.Calculator);\n" }, { "alpha_fraction": 0.3600628972053528, "alphanum_fraction": 0.38979417085647583, "avg_line_length": 41.138553619384766, "blob_id": "7c479fcfcbccb4c0fde9697b721e0855911e66f0", "content_id": "c96adacb76c08c05a20a33f1966df80386175a62", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6996, "license_type": "no_license", "max_line_length": 75, "num_lines": 166, "path": "/impact/tests/plugins/NEXIS_building_impact_model.py", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "\nimport numpy\nimport scipy\n\nfrom django.template.loader import render_to_string\nfrom impact.plugins.core import FunctionProvider\n\nfrom impact.engine.utilities import MAXFLOAT\nfrom impact.plugins.utilities import Damage_curve\nfrom impact.storage.vector import Vector\n\n#------------------------------------------------------------\n# Define damage curves for tsunami structural building damage\n#------------------------------------------------------------\nstruct_damage_curve = {'Double brick': Damage_curve([[-MAXFLOAT, 0.0],\n [0.0, 0.016],\n [0.1, 0.150],\n [0.3, 0.425],\n [0.5, 0.449],\n [1.0, 0.572],\n [1.5, 0.582],\n [2.0, 0.587],\n [2.5, 0.647],\n [MAXFLOAT, 64.7]]),\n 'Brick veneer': Damage_curve([[-MAXFLOAT, 0.0],\n [0.0, 0.016],\n [0.1, 0.169],\n [0.3, 0.445],\n [0.5, 0.472],\n [1.0, 0.618],\n [1.5, 0.629],\n [2.0, 0.633],\n [2.5, 0.694],\n [MAXFLOAT, 69.4]]),\n 'Timber': Damage_curve([[-MAXFLOAT, 0.0],\n [0.0, 0.016],\n [0.3, 0.645],\n [1.0, 0.818],\n [2.0, 0.955],\n [MAXFLOAT, 99.4]])}\n\ncontents_damage_curve = Damage_curve([[-MAXFLOAT, 0.0],\n [0.0, 0.013],\n [0.1, 0.102],\n [0.3, 0.381],\n [0.5, 0.500],\n [1.0, 0.970],\n [1.5, 0.976],\n [2.0, 0.986],\n [MAXFLOAT, 98.6]])\n\n\nclass TsunamiBuildingLossFunction(FunctionProvider):\n \"\"\"Risk plugin for earthquake damage based on empirical results\n\n :param requires category=='hazard' and \\\n subcategory.startswith('tsunami') and \\\n layer_type=='raster'\n :param requires category=='exposure' and \\\n subcategory.startswith('building') and \\\n layer_type=='vector' and \\\n nothing=='never'\n \"\"\"\n\n @staticmethod\n def target_value():\n field = 'STRUCT_DAMAGE_fraction'\n return field[0:9]\n\n @staticmethod\n def run(layers):\n \"\"\"Risk plugin for tsunami building damage\n \"\"\"\n\n # Extract data\n # FIXME (Ole): This will be replaced by a helper function\n # to separate hazard from exposure using keywords\n H = layers[0] # Ground shaking\n E = layers[1] # Building locations\n\n # Interpolate hazard level to building locations\n H = H.interpolate(E)\n\n # Extract relevant numerical data\n coordinates = E.get_geometry()\n inundation = H.get_data()\n\n # Calculate\n N = len(H)\n impact = []\n for i in range(N):\n\n #-------------------\n # Extract parameters\n #-------------------\n depth = float(inundation[i].values()[0])\n shore_distance = E.get_data('SHORE_DIST', i)\n\n # FIXME: Get rid of the type casting when\n # issue #66 is done\n number_of_people_in_building = int(E.get_data('NEXIS_PEOP', i))\n wall_type = E.get_data('WALL_TYPE', i)\n contents_value = E.get_data('CONT_VALUE', i)\n structure_value = E.get_data('STR_VALUE', i)\n\n #------------------------\n # Compute people affected\n #------------------------\n if 0.01 < depth < 1.0:\n people_affected = number_of_people_in_building\n else:\n people_affected = 0\n\n if depth >= 1.0:\n people_severely_affected = number_of_people_in_building\n else:\n people_severely_affected = 0\n\n #----------------------------------------\n # Compute impact on buldings and contents\n #----------------------------------------\n depth_floor = depth - 0.3 # Adjust for floor height\n\n if depth_floor >= 0.0:\n buildings_inundated = 1\n else:\n buildings_inundated = 0\n\n if depth_floor < 0.0:\n structural_damage = contents_damage = 0.0\n else:\n # Water is deep enough to cause damage\n if wall_type in struct_damage_curve:\n curve = struct_damage_curve[wall_type]\n else:\n # Establish default for unknown wall type\n curve = struct_damage_curve['Brick veneer']\n\n structural_damage = curve(depth_floor)\n contents_damage = contents_damage_curve(depth_floor)\n\n #---------------\n # Compute losses\n #---------------\n structural_loss = structural_damage * structure_value\n contents_loss = contents_damage * contents_value\n\n #-------\n # Return\n #-------\n impact.append({'NEXIS_PEOP': number_of_people_in_building,\n 'PEOPLE_AFFECTED': people_affected,\n 'PEOPLE_SEV_AFFECTED': people_severely_affected,\n 'STRUCT_INUNDATED': buildings_inundated,\n 'STRUCT_DAMAGE_fraction': structural_damage,\n 'CONTENTS_DAMAGE_fraction': contents_damage,\n 'STRUCT_LOSS_AUD': structural_loss,\n 'CONTENTS_LOSS_AUD': contents_loss,\n 'DEPTH': depth})\n\n # FIXME (Ole): Need helper to generate new layer using\n # correct spatial reference\n # (i.e. sensibly wrap the following lines)\n V = Vector(data=impact, projection=E.get_projection(),\n geometry=coordinates,\n name='Estimated tsunami impact')\n return V\n" }, { "alpha_fraction": 0.5830930471420288, "alphanum_fraction": 0.5890366435050964, "avg_line_length": 33.92908477783203, "blob_id": "3eb085065f1c1f1f2c77356c561725c1fb3031ef", "content_id": "03608422e1b220331b9893b40bed9f3e3921709b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 33986, "license_type": "no_license", "max_line_length": 79, "num_lines": 973, "path": "/impact/storage/io.py", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "\"\"\"IO module for reading and writing of files\n\n This module provides functionality to read and write\n raster and vector layers from numerical data.\n\"\"\"\n\nimport os\nimport time\nimport numpy\nimport urllib2\nimport tempfile\nimport contextlib\nfrom zipfile import ZipFile\n\nfrom impact.storage.vector import Vector\nfrom impact.storage.raster import Raster\nfrom impact.storage.utilities import is_sequence\nfrom impact.storage.utilities import LAYER_TYPES\nfrom impact.storage.utilities import WCS_TEMPLATE\nfrom impact.storage.utilities import WFS_TEMPLATE\nfrom impact.storage.utilities import unique_filename\nfrom impact.storage.utilities import write_keywords\nfrom impact.storage.utilities import extract_geotransform\nfrom impact.storage.utilities import geotransform2resolution\n\nfrom owslib.wcs import WebCoverageService\nfrom owslib.wfs import WebFeatureService\n\nfrom geonode.maps.utils import file_upload, GeoNodeException\nfrom django.conf import settings\n\nimport logging\nlogger = logging.getLogger('risiko')\n\nINTERNAL_SERVER_URL = os.path.join(settings.GEOSERVER_BASE_URL, 'ows')\n\n\ndef read_layer(filename):\n \"\"\"Read spatial layer from file.\n This can be either raster or vector data.\n \"\"\"\n\n _, ext = os.path.splitext(filename)\n if ext in ['.asc', '.tif']:\n return Raster(filename)\n elif ext in ['.shp', '.gml']:\n return Vector(filename)\n else:\n msg = ('Could not read %s. '\n 'Extension \"%s\" has not been implemented' % (filename, ext))\n raise Exception(msg)\n\n\ndef write_raster_data(data, projection, geotransform, filename, keywords=None):\n \"\"\"Write array to raster file with specified metadata and one data layer\n\n Input:\n data: Numpy array containing grid data\n projection: WKT projection information\n geotransform: 6 digit vector\n (top left x, w-e pixel resolution, rotation,\n top left y, rotation, n-s pixel resolution).\n See e.g. http://www.gdal.org/gdal_tutorial.html\n filename: Output filename\n keywords: Optional dictionary\n\n Note: The only format implemented is GTiff and the extension must be .tif\n \"\"\"\n\n R = Raster(data, projection, geotransform, keywords=keywords)\n R.write_to_file(filename)\n\n\ndef write_vector_data(data, projection, geometry, filename, keywords=None):\n \"\"\"Write point data and any associated attributes to vector file\n\n Input:\n data: List of N dictionaries each with M fields where\n M is the number of attributes.\n A value of None is acceptable.\n projection: WKT projection information\n geometry: List of points or polygons.\n filename: Output filename\n keywords: Optional dictionary\n\n Note: The only format implemented is GML and SHP so the extension\n must be either .gml or .shp\n\n # FIXME (Ole): When the GML driver is used,\n # the spatial reference is not stored.\n # I suspect this is a bug in OGR.\n\n Background:\n * http://www.gdal.org/ogr/ogr_apitut.html (last example)\n * http://invisibleroads.com/tutorials/gdal-shapefile-points-save.html\n \"\"\"\n\n V = Vector(data, projection, geometry, keywords=keywords)\n V.write_to_file(filename)\n\n\ndef get_bounding_box(filename):\n \"\"\"Get bounding box for specified raster or vector file\n\n Input:\n filename\n\n Output:\n bounding box as python list of numbers [West, South, East, North]\n \"\"\"\n\n layer = read_layer(filename)\n return layer.get_bounding_box()\n\n\ndef bboxlist2string(bbox, decimals=6):\n \"\"\"Convert bounding box list to comma separated string\n\n Input\n bbox: List of coordinates of the form [W, S, E, N]\n Output\n bbox_string: Format 'W,S,E,N' - each will have 6 decimal points\n \"\"\"\n\n msg = 'Got string %s, but expected bounding box as a list' % str(bbox)\n assert not isinstance(bbox, basestring), msg\n\n try:\n bbox = list(bbox)\n except:\n msg = 'Could not coerce bbox %s into a list' % str(bbox)\n raise Exception(msg)\n\n msg = ('Bounding box must have 4 coordinates [W, S, E, N]. '\n 'I got %s' % str(bbox))\n assert len(bbox) == 4, msg\n\n for x in bbox:\n try:\n float(x)\n except ValueError, e:\n msg = ('Bounding box %s contained non-numeric entry %s, '\n 'original error was \"%s\".' % (bbox, x, e))\n raise AssertionError(msg)\n\n # Make template of the form '%.5f,%.5f,%.5f,%.5f'\n template = (('%%.%if,' % decimals) * 4)[:-1]\n\n # Assign numbers and return\n return template % tuple(bbox)\n\n\ndef bboxstring2list(bbox_string):\n \"\"\"Convert bounding box string to list\n\n Input\n bbox_string: String of bounding box coordinates of the form 'W,S,E,N'\n Output\n bbox: List of floating point numbers with format [W, S, E, N]\n \"\"\"\n\n msg = ('Bounding box must be a string with coordinates following the '\n 'format 105.592,-7.809,110.159,-5.647\\n'\n 'Instead I got %s of type %s.' % (str(bbox_string),\n type(bbox_string)))\n assert isinstance(bbox_string, basestring), msg\n\n fields = bbox_string.split(',')\n msg = ('Bounding box string must have 4 coordinates in the form '\n '\"W,S,E,N\". I got bbox == \"%s\"' % bbox_string)\n assert len(fields) == 4, msg\n\n for x in fields:\n try:\n float(x)\n except ValueError, e:\n msg = ('Bounding box %s contained non-numeric entry %s, '\n 'original error was \"%s\".' % (bbox_string, x, e))\n raise AssertionError(msg)\n\n return [float(x) for x in fields]\n\n\ndef get_bounding_box_string(filename):\n \"\"\"Get bounding box for specified raster or vector file\n\n Input:\n filename\n\n Output:\n bounding box as python string 'West, South, East, North'\n \"\"\"\n\n return bboxlist2string(get_bounding_box(filename))\n\n\ndef get_geotransform(server_url, layer_name):\n \"\"\"Constructs the geotransform based on the WCS service.\n\n Should only be called be rasters / WCS layers.\n\n Returns:\n geotransform is a vector of six numbers:\n\n (top left x, w-e pixel resolution, rotation,\n top left y, rotation, n-s pixel resolution).\n\n We should (at least) use elements 0, 1, 3, 5\n to uniquely determine if rasters are aligned\n\n \"\"\"\n\n metadata = get_metadata(server_url, layer_name)\n return metadata['geotransform']\n\n\ndef get_metadata_from_layer(layer):\n \"\"\"Get ows metadata from one layer\n\n Input\n layer: Layer object. It is assumed that it has the extra attribute\n data_type which is either raster or vector\n \"\"\"\n\n # Create empty metadata dictionary\n metadata = {}\n\n # Metadata specific to layer types\n metadata['layer_type'] = layer.datatype\n if layer.datatype == 'raster':\n geotransform = extract_geotransform(layer)\n metadata['geotransform'] = geotransform\n metadata['resolution'] = geotransform2resolution(geotransform,\n # Get both resx\n # and resy\n isotropic=False)\n else:\n metadata['resolution'] = None\n metadata['geotransform'] = None\n\n # Metadata common to both raster and vector data\n metadata['bounding_box'] = layer.boundingBoxWGS84\n metadata['title'] = layer.title # This maybe overwritten by keyword\n metadata['id'] = layer.id\n\n # Extract keywords\n keyword_dict = {}\n if not hasattr(layer, 'keywords'):\n msg = 'No keywords in %s. Submit patch to OWSLib maintainers.' % layer\n raise Exception(msg)\n else:\n for keyword in layer.keywords:\n if keyword is not None:\n for keyword_string in keyword.split(','):\n if ':' in keyword_string:\n key, value = keyword_string.strip().split(':')\n keyword_dict[key] = value\n else:\n keyword_dict[keyword_string] = None\n\n # Add resolution (as a string with one element) so that layer \"remembers\"\n # its original resolution,\n # FIXME (Ole): What is the best way of collapsing 2D resolution to\n # one number - resx and resy are not always identical!\n if metadata['resolution'] is not None:\n keyword_dict['resolution'] = str(metadata['resolution'][0])\n\n # FIXME (Ole): The statement below does not raise an Exception,\n # and nothing is written to the log file. See issue #170\n #raise Exception('weird')\n\n # Record all keywords as part of the metadata and return\n metadata['keywords'] = keyword_dict\n return metadata\n\n\ndef get_metadata(server_url, layer_name=None):\n \"\"\"Uses OWSLib to get the metadata for a given layer\n\n Input\n server_url: e.g. http://localhost:8001/geoserver-geonode-dev/ows\n layer_name: Name of layer - must follow the convention workspace:name\n If None metadata for all layers will be returned as a\n dictionary with one entry per layer\n\n Output\n metadata: Dictionary of metadata fields for specified layer or,\n if layer_name is None, a dictionary of metadata dictionaries\n \"\"\"\n\n # Get all metadata from server\n wcs = WebCoverageService(server_url, version='1.0.0')\n wfs = WebFeatureService(server_url, version='1.0.0')\n\n # Take care of input options\n if layer_name is None:\n layer_names = wcs.contents.keys() + wfs.contents.keys()\n else:\n layer_names = [layer_name]\n\n # Get metadata for requested layer(s)\n metadata = {}\n for name in layer_names:\n if name in wcs.contents:\n layer = wcs.contents[name]\n layer.datatype = 'raster' # Monkey patch layer type\n elif name in wfs.contents:\n layer = wfs.contents[name]\n layer.datatype = 'vector' # Monkey patch layer type\n else:\n msg = ('Layer %s was not found in WxS contents on server %s.\\n'\n 'WCS contents: %s\\n'\n 'WFS contents: %s\\n' % (name, server_url,\n wcs.contents, wfs.contents))\n raise Exception(msg)\n\n metadata[name] = get_metadata_from_layer(layer)\n\n # Return metadata for one or all layers\n if layer_name is not None:\n return metadata[layer_name]\n else:\n return metadata\n\n\ndef get_layer_descriptors(url):\n \"\"\"Get layer information for use with the plugin system\n\n The keywords are parsed and added to the metadata dictionary\n if they conform to the format \"identifier:value\".\n\n NOTE: Keywords will overwrite metadata with same keys. A notable\n example is title which is currently in use.\n\n Input\n url: The wfs url\n version: The version of the wfs xml expected\n\n Output\n A list of (lists of) dictionaries containing the metadata for\n each layer of the following form:\n\n [['geonode:lembang_schools',\n {'layer_type': 'vector',\n 'category': 'exposure',\n 'subcategory': 'building',\n 'title': 'lembang_schools'}],\n ['geonode:shakemap_padang_20090930',\n {'layer_type': 'raster',\n 'category': 'hazard',\n 'subcategory': 'earthquake',\n 'title': 'shakemap_padang_20090930'}]]\n\n \"\"\"\n\n # FIXME (Ole): I don't like the format, but it permeates right\n # through to the HTTPResponses in views.py, so\n # I am not sure if it can be changed. My problem is\n #\n # 1: A dictionary of metadata entries would be simpler\n # 2: The keywords should have their own dictionary to avoid\n # danger of keywords overwriting other metadata\n #\n # I have raised this in ticket #126\n\n # Get all metadata from owslib\n metadata = get_metadata(url)\n\n # Create exactly the same structure that was produced by the now obsolete\n # get_layers_metadata. FIXME: However, this is subject to issue #126\n x = []\n for key in metadata:\n # Get all metadata\n md = metadata[key]\n\n # Create new special purpose entry\n block = {}\n block['layer_type'] = md['layer_type']\n block['title'] = md['title']\n\n # Copy keyword data into this block possibly overwriting data\n for kw in md['keywords']:\n block[kw] = md['keywords'][kw]\n\n x.append([key, block])\n\n return x\n\n\ndef get_file(download_url, suffix):\n \"\"\"Download a file from an HTTP server.\n \"\"\"\n\n tempdir = '/tmp/%s' % str(time.time())\n os.mkdir(tempdir)\n t = tempfile.NamedTemporaryFile(delete=False,\n suffix=suffix,\n dir=tempdir)\n\n with contextlib.closing(urllib2.urlopen(download_url)) as f:\n data = f.read()\n\n if '<ServiceException>' in data:\n msg = ('File download failed.\\n'\n 'URL: %s\\n'\n 'Error message: %s' % (download_url, data))\n raise Exception(msg)\n\n # Write and return filename\n t.write(data)\n filename = os.path.abspath(t.name)\n return filename\n\n\ndef check_bbox_string(bbox_string):\n \"\"\"Check that bbox string is valid\n \"\"\"\n\n msg = 'Expected bbox as a string with format \"W,S,E,N\"'\n assert isinstance(bbox_string, basestring), msg\n\n # Use checks from string to list conversion\n minx, miny, maxx, maxy = bboxstring2list(bbox_string)\n\n # Check semantic integrity\n msg = ('Western border %.5f of bounding box %s was out of range '\n 'for longitudes ([-180:180])' % (minx, bbox_string))\n assert -180 <= minx <= 180, msg\n\n msg = ('Eastern border %.5f of bounding box %s was out of range '\n 'for longitudes ([-180:180])' % (maxx, bbox_string))\n assert -180 <= maxx <= 180, msg\n\n msg = ('Southern border %.5f of bounding box %s was out of range '\n 'for latitudes ([-90:90])' % (miny, bbox_string))\n assert -90 <= miny <= 90, msg\n\n msg = ('Northern border %.5f of bounding box %s was out of range '\n 'for latitudes ([-90:90])' % (maxy, bbox_string))\n assert -90 <= maxy <= 90, msg\n\n msg = ('Western border %.5f was greater than or equal to eastern border '\n '%.5f of bounding box %s' % (minx, maxx, bbox_string))\n assert minx < maxx, msg\n\n msg = ('Southern border %.5f was greater than or equal to northern border '\n '%.5f of bounding box %s' % (miny, maxy, bbox_string))\n assert miny < maxy, msg\n\n\ndef download(server_url, layer_name, bbox, resolution=None):\n \"\"\"Download the source data of a given layer.\n\n Input\n server_url: String such as 'http://www.aifdr.org:8080/geoserver/ows'\n layer_name: Layer identifier of the form workspace:name,\n e.g 'geonode:Earthquake_Ground_Shaking'\n bbox: Bounding box for layer. This can either be a string or a list\n with format [west, south, east, north], e.g.\n '87.998242,-8.269822,117.046094,5.097895'\n resolution: Optional argument specifying resolution in case of\n raster layers.\n Resolution can be a tuple (resx, resy) signifying the\n spacing in decimal degrees in the longitude, latitude\n direction respectively.\n If resolution is just one number it is used for both resx\n and resy.\n If resolution is None, the 'native' resolution of\n the dataset is used.\n\n Layer geometry type must be either 'vector' or 'raster'\n \"\"\"\n\n # Input checks\n assert isinstance(server_url, basestring)\n try:\n urllib2.urlopen(server_url)\n except Exception, e:\n msg = ('Argument server_url doesn\\'t appear to be a valid URL'\n 'I got %s. Error message was: %s' % (server_url, str(e)))\n raise Exception(msg)\n\n msg = ('Expected layer_name to be a basestring. '\n 'Instead got %s which is of type %s' % (layer_name,\n type(layer_name)))\n assert isinstance(layer_name, basestring), msg\n\n msg = ('Argument layer name must have the form'\n 'workspace:name. I got %s' % layer_name)\n assert len(layer_name.split(':')) == 2, msg\n\n if isinstance(bbox, list) or isinstance(bbox, tuple):\n bbox_string = bboxlist2string(bbox)\n elif isinstance(bbox, basestring):\n # Remove spaces if any (GeoServer freaks if string has spaces)\n bbox_string = ','.join([x.strip() for x in bbox.split(',')])\n else:\n msg = ('Bounding box must be a string or a list of coordinates with '\n 'format [west, south, east, north]. I got %s' % str(bbox))\n raise Exception(msg)\n\n # Check integrity of bounding box\n check_bbox_string(bbox_string)\n\n # Check resolution\n if resolution is not None:\n\n # Make sure it is a list or a tuple\n if not is_sequence(resolution):\n # Replicate single value twice\n resolution = (resolution, resolution)\n\n # Check length\n msg = ('Specified resolution must be either a number or a 2-tuple. '\n 'I got %s' % str(resolution))\n assert len(resolution) == 2, msg\n\n # Check floating point\n for res in resolution:\n try:\n float(res)\n except ValueError, e:\n msg = ('Expecting number for resolution, but got %s: %s'\n % (res, str(e)))\n raise RisikoException(msg)\n\n # Create REST request and download file\n template = None\n layer_metadata = get_metadata(server_url, layer_name)\n\n data_type = layer_metadata['layer_type']\n if data_type == 'vector':\n\n if resolution is not None:\n msg = ('Resolution was requested for Vector layer %s. '\n 'This can only be done for raster layers.' % layer_name)\n raise RisikoException(msg)\n\n template = WFS_TEMPLATE\n suffix = '.zip'\n download_url = template % (server_url, layer_name, bbox_string)\n thefilename = get_file(download_url, suffix)\n dirname = os.path.dirname(thefilename)\n t = open(thefilename, 'r')\n zf = ZipFile(t)\n namelist = zf.namelist()\n zf.extractall(path=dirname)\n (shpname,) = [name for name in namelist if '.shp' in name]\n filename = os.path.join(dirname, shpname)\n elif data_type == 'raster':\n\n if resolution is None:\n # Get native resolution and use that\n resolution = layer_metadata['resolution']\n #resolution = (resolution, resolution) #FIXME (Ole): Make nicer\n\n # Download raster using specified bounding box and resolution\n template = WCS_TEMPLATE\n suffix = '.tif'\n download_url = template % (server_url, layer_name, bbox_string,\n resolution[0], resolution[1])\n filename = get_file(download_url, suffix)\n\n # Write keywords file\n keywords = layer_metadata['keywords']\n write_keywords(keywords, os.path.splitext(filename)[0] + '.keywords')\n\n # Instantiate layer from file\n lyr = read_layer(filename)\n\n # FIXME (Ariel) Don't monkeypatch the layer object\n lyr.metadata = layer_metadata\n return lyr\n\n\ndef dummy_save(filename, title, user, metadata=''):\n \"\"\"Take a file-like object and uploads it to a GeoNode\n \"\"\"\n return 'http://dummy/data/geonode:' + filename + '_by_' + user.username\n\n\n#--------------------------------------------------------------------\n# Functionality to upload layers to GeoNode and check their integrity\n#--------------------------------------------------------------------\n\nclass RisikoException(Exception):\n pass\n\n\ndef console_log():\n \"\"\"Reconfigure logging to output to the console.\n \"\"\"\n\n for _module in [\"risiko\"]:\n _logger = logging.getLogger(_module)\n _logger.addHandler(logging.StreamHandler())\n _logger.setLevel(logging.INFO)\n\n\ndef run(cmd, stdout=None, stderr=None):\n \"\"\"Run command with stdout and stderr optionally redirected\n\n The logfiles are only kept in case the command fails.\n \"\"\"\n\n # Build command\n msg = 'Argument cmd must be a string. I got %s' % cmd\n assert isinstance(cmd, basestring), msg\n\n s = cmd\n if stdout is not None:\n msg = 'Argument stdout must be a string or None. I got %s' % stdout\n assert isinstance(stdout, basestring), msg\n s += ' > %s' % stdout\n\n if stderr is not None:\n msg = 'Argument stderr must be a string or None. I got %s' % stdout\n assert isinstance(stderr, basestring), msg\n s += ' 2> %s' % stderr\n\n # Run command\n err = os.system(s)\n\n if err != 0:\n msg = 'Command \"%s\" failed with errorcode %i. ' % (cmd, err)\n if stdout:\n msg += 'See logfile %s for stdout details' % stdout\n if stderr is not None:\n msg += 'See logfile %s for stderr details' % stderr\n raise Exception(msg)\n else:\n # Clean up\n if stdout is not None:\n os.remove(stdout)\n if stderr is not None:\n os.remove(stderr)\n\n\ndef assert_bounding_box_matches(layer, filename):\n \"\"\"Verify that GeoNode layer has the same bounding box as filename\n \"\"\"\n\n # Check integrity\n assert hasattr(layer, 'geographic_bounding_box')\n assert isinstance(layer.geographic_bounding_box, basestring)\n\n # Exctract bounding bounding box from layer handle\n s = 'POLYGON(('\n i = layer.geographic_bounding_box.find(s) + len(s)\n assert i > len(s)\n\n j = layer.geographic_bounding_box.find('))')\n assert j > i\n\n bbox_string = str(layer.geographic_bounding_box[i:j])\n A = numpy.array([[float(x[0]), float(x[1])] for x in\n (p.split() for p in bbox_string.split(','))])\n south = min(A[:, 1])\n north = max(A[:, 1])\n west = min(A[:, 0])\n east = max(A[:, 0])\n bbox = [west, south, east, north]\n\n # Check correctness of bounding box against reference\n ref_bbox = get_bounding_box(filename)\n\n msg = ('Bounding box from layer handle \"%s\" was not as expected.\\n'\n 'Got %s, expected %s' % (layer.name, bbox, ref_bbox))\n assert numpy.allclose(bbox, ref_bbox, rtol=1.0e-6, atol=1.0e-8), msg\n\n\ndef check_layer(layer, full=False):\n \"\"\"Verify if an object is a valid Layer.\n\n If check fails an exception is raised.\n\n Input\n layer: Layer object\n full: Optional flag controlling whether layer is to be downloaded\n as part of the check.\n \"\"\"\n\n from geonode.maps.models import Layer\n\n msg = ('Was expecting layer object, got None')\n assert layer is not None, msg\n msg = ('Was expecting layer object, got %s' % (type(layer)))\n assert type(layer) is Layer, msg\n msg = ('The layer does not have a valid name: %s' % layer.name)\n assert len(layer.name) > 0, msg\n msg = ('The layer does not have a valid workspace: %s' % layer.workspace)\n assert len(layer.workspace) > 0, msg\n\n # Get layer metadata\n layer_name = '%s:%s' % (layer.workspace, layer.name)\n metadata = get_metadata(INTERNAL_SERVER_URL, layer_name)\n #try:\n # metadata = get_metadata(INTERNAL_SERVER_URL, layer_name)\n #except:\n # # Convert any exception to AssertionError for use in retry loop in\n # # save_file_to_geonode.\n # raise AssertionError\n\n assert 'id' in metadata\n assert 'title' in metadata\n assert 'layer_type' in metadata\n assert 'keywords' in metadata\n assert 'bounding_box' in metadata\n\n # Get bounding box and download\n bbox = metadata['bounding_box']\n assert len(bbox) == 4\n\n if full:\n # Check that layer can be downloaded again\n downloaded_layer = download(INTERNAL_SERVER_URL, layer_name, bbox)\n assert os.path.exists(downloaded_layer.filename)\n\n # Check integrity between Django layer and file\n assert_bounding_box_matches(layer, downloaded_layer.filename)\n\n # Read layer and verify\n L = read_layer(downloaded_layer.filename)\n\n # Could do more here\n #print dir(L)\n #print L.keywords #FIXME(Ole): I don't think keywords are downloaded!\n #print metadata['keywords']\n\n\ndef save_file_to_geonode(filename, user=None, title=None,\n overwrite=True, check_metadata=True,\n ignore=None):\n \"\"\"Save a single layer file to local Risiko GeoNode\n\n Input\n filename: Layer filename of type as defined in LAYER_TYPES\n user: Django User object\n title: String describing the layer.\n If None or '' the filename will be used.\n overwrite: Boolean variable controlling whether existing layers\n can be overwritten by this operation. Default is True\n check_metadata: Flag controlling whether metadata is verified.\n If True (default), an exception will be raised\n if metada is not available after a number of retries.\n If False, no check is done making the function faster.\n Output\n layer object\n \"\"\"\n\n if ignore is not None and filename == ignore:\n return None\n\n # Extract fully qualified basename and extension\n basename, extension = os.path.splitext(filename)\n\n if extension not in LAYER_TYPES:\n msg = ('Invalid file extension in file %s. Valid extensions are '\n '%s' % (filename, str(LAYER_TYPES)))\n raise RisikoException(msg)\n\n # Use file name to derive title if not specified\n if title is None or title == '':\n title = os.path.split(basename)[-1]\n\n # Try to find a file with a .keywords extension\n # and create a keywords list from there.\n # It is assumed that the keywords are separated\n # by new lines.\n # Empty keyword lines are ignored (as this causes issues downstream)\n keyword_list = []\n keyword_file = basename + '.keywords'\n if os.path.exists(keyword_file):\n f = open(keyword_file, 'r')\n for line in f.readlines():\n\n # Ignore blank lines\n raw_keyword = line.strip()\n if raw_keyword == '':\n continue\n\n # Strip any spaces after or before the colons if present\n if ':' in raw_keyword:\n keyword = ':'.join([x.strip() for x in raw_keyword.split(':')])\n\n # FIXME (Ole): Replace spaces by underscores and store keyword.\n # See issue #148\n keyword_list.append(keyword.replace(' ', '_'))\n f.close()\n\n # Take care of file types\n if extension == '.asc':\n # We assume this is an AAIGrid ASCII file such as those generated by\n # ESRI and convert it to Geotiff before uploading.\n\n # Create temporary tif file for upload and check that the road is clear\n prefix = os.path.split(basename)[-1]\n upload_filename = unique_filename(prefix=prefix, suffix='.tif')\n upload_basename, extension = os.path.splitext(upload_filename)\n\n # Copy any metadata files to unique filename\n for ext in ['.sld', '.keywords']:\n if os.path.exists(basename + ext):\n cmd = 'cp %s%s %s%s' % (basename, ext, upload_basename, ext)\n run(cmd)\n\n # Check that projection file exists\n prjname = basename + '.prj'\n if not os.path.isfile(prjname):\n msg = ('File %s must have a projection file named '\n '%s' % (filename, prjname))\n raise RisikoException(msg)\n\n # Convert ASCII file to GeoTIFF\n R = read_layer(filename)\n R.write_to_file(upload_filename)\n else:\n # The specified file is the one to upload\n upload_filename = filename\n\n # Attempt to upload the layer\n try:\n # Upload\n layer = file_upload(upload_filename,\n user=user,\n title=title,\n keywords=keyword_list,\n overwrite=overwrite)\n\n # FIXME (Ole): This workaround should be revisited.\n # This fx means that keywords can't have spaces\n # Really need a generic way of getting this kind of\n # info in and out of GeoNode\n layer.keywords = ' '.join(keyword_list)\n layer.save()\n except GeoNodeException, e:\n # Layer did not upload. Convert GeoNodeException to RisikoException\n raise RisikoException(e)\n else:\n logmsg = ('Uploaded \"%s\" with name \"%s\".'\n % (basename, layer.name))\n if not check_metadata:\n logmsg += ' Did not explicitly verify metadata.'\n logger.info(logmsg)\n return layer\n else:\n # Check metadata and return layer object\n logmsg += ' Metadata veried.'\n ok = False\n for i in range(4):\n try:\n check_layer(layer)\n except Exception, errmsg:\n logger.info('Metadata for layer %s not yet ready - '\n 'trying again. Error message was: %s'\n % (layer.name, errmsg))\n time.sleep(0.3)\n else:\n ok = True\n break\n if ok:\n logger.info(logmsg)\n return layer\n else:\n msg = ('Could not confirm that layer %s was uploaded '\n 'correctly: %s' % (layer, errmsg))\n raise Exception(msg)\n finally:\n # Clean up generated tif files in either case\n if extension == '.asc':\n os.remove(upload_filename)\n os.remove(upload_filename + '.aux.xml')\n\n\ndef save_directory_to_geonode(directory,\n user=None,\n title=None,\n overwrite=True,\n check_metadata=True,\n ignore=None):\n \"\"\"Upload a directory of spatial data files to GeoNode\n\n Input\n directory: Valid root directory for layer files\n user: Django User object\n overwrite: Boolean variable controlling whether existing layers\n can be overwritten by this operation. Default is True\n check_metadata: See save_file_to_geonode\n ignore: None or list of filenames to ignore\n Output\n list of layer objects\n \"\"\"\n\n if ignore is None:\n ignore = []\n\n msg = ('Argument %s to save_directory_to_geonode is not a valid directory.'\n % directory)\n assert os.path.isdir(directory), msg\n\n layers = []\n for root, _, files in os.walk(directory):\n for short_filename in files:\n if short_filename in ignore:\n continue\n\n _, extension = os.path.splitext(short_filename)\n filename = os.path.join(root, short_filename)\n\n # Attempt upload only if extension is recognised\n if extension in LAYER_TYPES:\n try:\n layer = save_to_geonode(filename,\n user=user,\n title=title,\n overwrite=overwrite,\n check_metadata=check_metadata)\n\n except Exception, e:\n msg = ('Filename \"%s\" could not be uploaded. '\n 'Error was: %s' % (filename, str(e)))\n raise RisikoException(msg)\n else:\n layers.append(layer)\n\n # Return layers that successfully uploaded\n return layers\n\n\ndef save_to_geonode(incoming, user=None, title=None,\n overwrite=True, check_metadata=True,\n ignore=None):\n \"\"\"Save a files to local Risiko GeoNode\n\n Input\n incoming: Either layer file or directory\n user: Django User object\n title: If specified, it will be applied to all files. If None or ''\n filenames will be used to infer titles.\n overwrite: Boolean variable controlling whether existing layers\n can be overwritten by this operation. Default is True\n check_metadata: See save_file_to_geonode\n ignore: None or list of filenames to ignore\n\n FIXME (Ole): WxS contents does not reflect the renaming done\n when overwrite is False. This should be reported to\n the geonode-dev mailing list\n\n Output\n layer object or list of layer objects\n \"\"\"\n\n msg = ('First argument to save_to_geonode must be a string. '\n 'I got %s' % incoming)\n assert isinstance(incoming, basestring), msg\n\n if os.path.isdir(incoming):\n # Upload all valid layer files in this dir recursively\n layers = save_directory_to_geonode(incoming, title=title, user=user,\n overwrite=overwrite,\n check_metadata=check_metadata,\n ignore=ignore)\n return layers\n elif os.path.isfile(incoming):\n # Upload single file (using its name as title)\n layer = save_file_to_geonode(incoming, title=title, user=user,\n overwrite=overwrite,\n check_metadata=check_metadata,\n ignore=ignore)\n return layer\n else:\n msg = 'Argument %s was neither a file or a directory' % incoming\n raise RisikoException(msg)\n" }, { "alpha_fraction": 0.6694915294647217, "alphanum_fraction": 0.6694915294647217, "avg_line_length": 28.5, "blob_id": "b27ab4291a69aabbedfb8e5b68276ad593e9a639", "content_id": "9ee7059a19c0cff126c8eec46959efbf060bf543", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 472, "license_type": "no_license", "max_line_length": 87, "num_lines": 16, "path": "/docs/development/dev_help.rst", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "============\nGetting help\n============\n\n* Report bugs with RiaB in our `ticket tracker`_.\n\n* `archives of the risiko-dev mailing list`_.\n\n* `post a question`_.\n\n* _#riab IRC channel (irc://irc.freenode.net/riab).\n\n.. _archives of the risiko-dev mailing list: http://groups.google.com/group/risiko-dev/\n.. _post a question: http://groups.google.com/group/risiko-dev\n.. _#riab IRC channel: irc://irc.freenode.net/riab\n.. _ticket tracker: http://github.com/AIFDR/riab/issues\n" }, { "alpha_fraction": 0.8148148059844971, "alphanum_fraction": 0.8148148059844971, "avg_line_length": 53, "blob_id": "214fbd09525b8761aad49ae4d39c5977130a2525", "content_id": "f446797c38c65d2a12b992d041f1d35381a7e849", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 54, "license_type": "no_license", "max_line_length": 53, "num_lines": 1, "path": "/impact/tests/plugins/__init__.py", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "# This package contains plugins used for testing only\n" }, { "alpha_fraction": 0.5513905882835388, "alphanum_fraction": 0.5574365258216858, "avg_line_length": 19.936708450317383, "blob_id": "f52fd9f859bc388c8798ef948d5702e28ba7bc9c", "content_id": "210879d1c2d3b8893216e5355a0055d494e53ab8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1654, "license_type": "no_license", "max_line_length": 73, "num_lines": 79, "path": "/impact/plugins/utilities.py", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "\"\"\"Module to create damage curves from point data\n\"\"\"\n\nimport numpy\nfrom scipy.interpolate import interp1d\n\n\nclass Damage_curve:\n \"\"\"Class for implementation of damage curves based on point data\n \"\"\"\n\n def __init__(self, data):\n\n try:\n data = numpy.array(data)\n except:\n msg = 'Could not convert data %s to damage curve' % str(data)\n raise Exception(msg)\n\n msg = 'Damage curve data must be a 2d array or a list of lists'\n assert len(data.shape) == 2, msg\n\n msg = 'Damage curve data must have two columns'\n assert data.shape[1] == 2, msg\n\n x = data[:, 0]\n y = data[:, 1]\n\n self.curve = interp1d(x, y)\n\n def __call__(self, x):\n return self.curve(x)\n\n\nclass ColorMapEntry:\n \"\"\"Representation of color map entry in SLD file\n\n Input\n color\n quantity\n opacity (default '0')\n \"\"\"\n\n def __init__(self, color, quantity, opacity=None):\n self.color = color\n self.opacity = opacity\n self.quantity = quantity\n\n\nclass PointSymbol:\n \"\"\"\n \"\"\"\n\n def __init__(self, value, icon):\n self.value = value\n self.icon = icon\n\n\nclass PointClassColor:\n \"\"\"\n \"\"\"\n\n def __init__(self, name, clmin, clmax, fill_color,\n stroke_color=None, opacity=1):\n self.name = name\n self.clmin = clmin\n self.clmax = clmax\n self.fill_color = fill_color\n self.stroke_color = stroke_color\n self.opacity = opacity\n\n\nclass PointZoomSize:\n \"\"\"\n \"\"\"\n\n def __init__(self, level, size):\n self.level = level\n self.size = size\n" }, { "alpha_fraction": 0.5108378529548645, "alphanum_fraction": 0.5251843333244324, "avg_line_length": 39.957115173339844, "blob_id": "f76fe16238cf140a6f822a5bbb9949828de6da28", "content_id": "14c6c465f093d02968a6e2e62ef877dea67d0645", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 44889, "license_type": "no_license", "max_line_length": 79, "num_lines": 1096, "path": "/impact/tests/test_geonode_operations.py", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "from geonode.maps.utils import upload, GeoNodeException\nfrom geonode.maps.models import Layer\nfrom impact.storage.utilities import unique_filename, LAYER_TYPES\nfrom impact.storage.io import get_bounding_box\nfrom impact.storage.io import download, get_metadata\nfrom django.conf import settings\nimport os\nimport time\nimport unittest\nimport numpy\nimport urllib2\nfrom geonode.maps.utils import get_valid_user\nfrom impact.storage.io import save_to_geonode, RisikoException\nfrom impact.storage.io import check_layer, assert_bounding_box_matches\nfrom impact.storage.io import get_bounding_box_string\nfrom impact.storage.io import bboxstring2list\nfrom impact.storage.utilities import nanallclose\nfrom impact.tests.utilities import TESTDATA, INTERNAL_SERVER_URL\nfrom impact.tests.utilities import get_web_page\nfrom impact.storage.io import read_layer\n\n#---Jeff\nfrom owslib.wcs import WebCoverageService\nimport tempfile\n\n\n# FIXME: Can go when OWSLib patch comes on line\ndef ns(tag):\n return '{http://www.opengis.net/wcs}' + tag\n#---\n\n\nclass Test_geonode_connection(unittest.TestCase):\n \"\"\"Tests file uploads, metadata etc\n \"\"\"\n\n def setUp(self):\n \"\"\"Create valid superuser\n \"\"\"\n self.user = get_valid_user()\n\n def tearDown(self):\n pass\n\n def test_layer_upload(self):\n \"\"\"Layers can be uploaded to local GeoNode\n \"\"\"\n\n expected_layers = []\n not_expected_layers = []\n datadir = TESTDATA\n BAD_LAYERS = ['grid_without_projection.asc',\n 'kecamatan_prj.shp'] # FIXME(Ole): This layer is not\n # 'BAD', just in a different\n # projection (TM3_Zone_48-2) so\n # serves as another test for\n # issue #40\n for root, dirs, files in os.walk(datadir):\n for filename in files:\n basename, extension = os.path.splitext(filename)\n\n if extension.lower() in LAYER_TYPES:\n # FIXME(Ole): GeoNode converts names to lower case\n name = unicode(basename.lower())\n if filename in BAD_LAYERS:\n not_expected_layers.append(name)\n else:\n expected_layers.append(name)\n\n # Upload\n layers = save_to_geonode(datadir, user=self.user, overwrite=True,\n ignore=BAD_LAYERS)\n\n # Check integrity\n layer_names = [l.name for l in layers]\n for layer in layers:\n msg = 'Layer %s was uploaded but not expected' % layer.name\n assert layer.name in expected_layers, msg\n\n # Uncomment to reproduce issue #102\n # This may still also reproduce issue #40 for layer\n # tsunami_max_inundation_depth_bb_utm\n #check_layer(layer, full=True)\n\n for layer_name in expected_layers:\n msg = ('The following layer should have been uploaded '\n 'but was not: %s' % layer_name)\n assert layer_name in layer_names, msg\n\n # Check the layer is in the Django database\n Layer.objects.get(name=layer_name)\n\n # Check that layer is in geoserver\n found = False\n gs_username, gs_password = settings.GEOSERVER_CREDENTIALS\n page = get_web_page(os.path.join(settings.GEOSERVER_BASE_URL,\n 'rest/layers'),\n username=gs_username,\n password=gs_password)\n for line in page:\n if line.find('rest/layers/%s.html' % layer_name) > 0:\n found = True\n if not found:\n msg = ('Upload could not be verified, the layer %s is not '\n 'in geoserver %s, but GeoNode did not raise any errors, '\n 'this should never happen.' %\n (layer_name, settings.GEOSERVER_BASE_URL))\n raise GeoNodeException(msg)\n\n server_url = settings.GEOSERVER_BASE_URL + 'ows?'\n\n # Verify that the GeoServer GetCapabilities record is accessible:\n metadata = get_metadata(server_url)\n msg = ('The metadata list should not be empty in server %s'\n % server_url)\n assert len(metadata) > 0, msg\n\n # FIXME(Ole): Check the keywords are recognized too\n\n def test_raster_wcs_reprojection(self):\n \"\"\"UTM Raster can be reprojected by Geoserver and downloaded correctly\n \"\"\"\n # FIXME (Ole): Jeff needs to do this with assertions (ticket #40)\n\n filename = 'tsunami_max_inundation_depth_BB_utm.asc'\n projected_tif_file = os.path.join(TESTDATA, filename)\n\n #projected_tif = file_upload(projected_tif_file, overwrite=True)\n projected_tif = save_to_geonode(projected_tif_file,\n user=self.user,\n overwrite=True)\n check_layer(projected_tif)\n\n wcs_url = settings.GEOSERVER_BASE_URL + 'wcs'\n wcs = WebCoverageService(wcs_url, version='1.0.0')\n #logger.info(wcs.contents)\n metadata = wcs.contents[projected_tif.typename]\n #logger.info(metadata.grid)\n bboxWGS84 = metadata.boundingBoxWGS84\n #logger.info(bboxWGS84)\n resx = metadata.grid.offsetvectors[0][0]\n resy = abs(float(metadata.grid.offsetvectors[1][1]))\n #logger.info(\"resx=%s resy=%s\" % (str(resx), str(resy)))\n formats = metadata.supportedFormats\n #logger.info(formats)\n supportedCRS = metadata.supportedCRS\n #logger.info(supportedCRS)\n width = metadata.grid.highlimits[0]\n height = metadata.grid.highlimits[1]\n #logger.info(\"width=%s height=%s\" % (width, height))\n gs_cat = Layer.objects.gs_catalog\n cvg_store = gs_cat.get_store(projected_tif.name)\n cvg_layer = gs_cat.get_resource(projected_tif.name, store=cvg_store)\n #logger.info(cvg_layer.request_srs_list)\n #logger.info(cvg_layer.response_srs_list)\n\n # FIXME: A patch was submitted OWSlib 20110808\n # Can delete the following once patch appears\n # In the future get bboxNative and nativeSRS from get_metadata\n descCov = metadata._service.getDescribeCoverage(projected_tif.typename)\n envelope = (descCov.find(ns('CoverageOffering/') + ns('domainSet/') +\n ns('spatialDomain/') +\n '{http://www.opengis.net/gml}Envelope'))\n nativeSrs = envelope.attrib['srsName']\n #logger.info(nativeSrs)\n gmlpositions = envelope.findall('{http://www.opengis.net/gml}pos')\n lc = gmlpositions[0].text\n uc = gmlpositions[1].text\n bboxNative = (float(lc.split()[0]), float(lc.split()[1]),\n float(uc.split()[0]), float(uc.split()[1]))\n #logger.info(bboxNative)\n # ---- END PATCH\n\n # Make a temp dir to store the saved files\n tempdir = '/tmp/%s' % str(time.time())\n os.mkdir(tempdir)\n\n # Check that the layer can be downloaded in its native projection\n cvg = wcs.getCoverage(identifier=projected_tif.typename,\n format='GeoTIFF',\n crs=nativeSrs,\n bbox=bboxNative,\n resx=resx,\n resy=resy)\n\n t = tempfile.NamedTemporaryFile(delete=False,\n dir=tempdir)\n\n out = open(t.name, 'wb')\n out.write(cvg.read())\n out.close()\n #logger.info(\"GeoTIFF in %s = %s\" % (nativeSrs, t.name))\n # TODO: Verify that the file is a valid GeoTiff and that it is\n # _exactly_ the same size and bbox of the original\n\n # Test that the layer can be downloaded in ARCGRID format\n cvg_layer.supported_formats = cvg_layer.supported_formats + ['ARCGRID']\n gs_cat.save(cvg_layer)\n cvg = wcs.getCoverage(identifier=projected_tif.typename,\n format='ARCGRID',\n crs=nativeSrs,\n bbox=bboxNative,\n resx=resx,\n resy=resy)\n\n t = tempfile.NamedTemporaryFile(delete=False,\n dir=tempdir)\n\n out = open(t.name, 'wb')\n out.write(cvg.read())\n out.close()\n #logger.info(\"ARCGRID in %s = %s\" % (nativeSrs, t.name))\n # Check that the downloaded file is a valid ARCGRID file and that it\n # the required projection information\n # (FIXME: There is no prj file here. GS bug)\n\n # Check that the layer can downloaded in WGS84\n cvg_layer.request_srs_list += ['EPSG:4326']\n cvg_layer.response_srs_list += ['EPSG:4326']\n gs_cat.save(cvg_layer)\n #logger.info(cvg_layer.request_srs_list)\n #logger.info(cvg_layer.response_srs_list)\n cvg = wcs.getCoverage(identifier=projected_tif.typename,\n format='GeoTIFF',\n crs='EPSG:4326',\n bbox=bboxWGS84,\n #resx=0.000202220898116, # Should NOT be hard-coded!\n # How do we convert\n #resy=0.000202220898116) # See comments in riab issue #103\n width=width,\n height=height)\n\n t = tempfile.NamedTemporaryFile(delete=False,\n dir=tempdir)\n\n out = open(t.name, 'wb')\n out.write(cvg.read())\n out.close()\n #logger.info(\"GeoTIFF in %s = %s\" % (\"EPSG:4326\", t.name))\n # TODO: Verify that the file is a valid GeoTiff and that it is\n # the correct size and bbox based on the resx and resy or width\n # and height specified\n\n # Check that we can download the layer in another projection\n cvg_layer.request_srs_list += ['EPSG:32356']\n cvg_layer.response_srs_list += ['EPSG:32356']\n cvg_layer.request_srs_list += ['EPSG:900913']\n cvg_layer.response_srs_list += ['EPSG:900913']\n gs_cat.save(cvg_layer)\n #logger.info(cvg_layer.request_srs_list)\n #logger.info(cvg_layer.response_srs_list)\n # How do we get the bboxes for the newly assigned\n # request/response SRS??\n\n cvg = wcs.getCoverage(identifier=projected_tif.typename,\n format='GeoTIFF',\n crs='EPSG:32356', # Should not be hardcoded for a test,\n # or should use 900913 (need bbox)\n bbox=bboxNative,\n #resx=0.000202220898116, # Should NOT be hard-coded!\n # How do we convert\n #resy=0.000202220898116) # See comments in riab issue #103\n width=width,\n height=height)\n\n t = tempfile.NamedTemporaryFile(delete=False,\n dir=tempdir)\n\n out = open(t.name, 'wb')\n out.write(cvg.read())\n out.close()\n #logger.info(\"GeoTIFF in %s = %s\" % (\"EPSG:32356\", t.name))\n # TODO: Verify that the file is a valid GeoTiff and that it is\n # the correct size and bbox based on the resx and resy or width\n # and height specified\n\n # Clean up and completely delete the layer\n #projected_tif.delete()\n\n def test_extension_not_implemented(self):\n \"\"\"RisikoException is returned for not compatible extensions\n \"\"\"\n sampletxt = os.path.join(TESTDATA,\n 'lembang_schools_percentage_loss.dbf')\n try:\n save_to_geonode(sampletxt, user=self.user)\n except RisikoException, e:\n pass\n else:\n msg = ('Expected an exception for invalid .dbf type')\n raise Exception(msg)\n\n def test_shapefile(self):\n \"\"\"Shapefile can be uploaded\n \"\"\"\n thefile = os.path.join(TESTDATA, 'lembang_schools.shp')\n layer = save_to_geonode(thefile, user=self.user, overwrite=True)\n check_layer(layer, full=True)\n\n assert isinstance(layer.geographic_bounding_box, basestring)\n\n def test_shapefile_without_prj(self):\n \"\"\"Shapefile with without prj file is rejected\n \"\"\"\n\n thefile = os.path.join(TESTDATA,\n 'lembang_schools_percentage_loss.shp')\n try:\n uploaded = save_to_geonode(thefile, user=self.user)\n except RisikoException, e:\n pass\n except Exception, e:\n msg = ('Was expecting a %s, got %s instead.' %\n (RisikoException, type(e)))\n assert e is RisikoException, msg\n\n def test_asciifile_without_prj(self):\n \"\"\"ASCII file with without prj file is rejected\n \"\"\"\n\n thefile = os.path.join(TESTDATA,\n 'grid_without_projection.asc')\n\n try:\n uploaded = save_to_geonode(thefile, user=self.user)\n except RisikoException, e:\n pass\n except Exception, e:\n msg = ('Was expecting a %s, got %s instead.' %\n (RisikoException, type(e)))\n assert e is RisikoException, msg\n\n def test_tiff(self):\n \"\"\"GeoTIF file can be uploaded\n \"\"\"\n thefile = os.path.join(TESTDATA, 'Population_2010_clip.tif')\n uploaded = save_to_geonode(thefile, user=self.user, overwrite=True)\n check_layer(uploaded, full=True)\n\n def test_asc(self):\n \"\"\"ASCII file can be uploaded\n \"\"\"\n thefile = os.path.join(TESTDATA, 'test_grid.asc')\n uploaded = save_to_geonode(thefile, user=self.user, overwrite=True)\n check_layer(uploaded, full=True)\n\n def test_another_asc(self):\n \"\"\"Real world ASCII file can be uploaded\n \"\"\"\n thefile = os.path.join(TESTDATA, 'lembang_mmi_hazmap.asc')\n layer = save_to_geonode(thefile, user=self.user, overwrite=True)\n check_layer(layer, full=True)\n\n # Verify metadata\n layer_name = '%s:%s' % (layer.workspace, layer.name)\n metadata = get_metadata(INTERNAL_SERVER_URL,\n layer_name)\n assert 'id' in metadata\n assert 'title' in metadata\n assert 'layer_type' in metadata\n assert 'keywords' in metadata\n assert 'bounding_box' in metadata\n assert 'geotransform' in metadata\n assert len(metadata['bounding_box']) == 4\n\n # A little metadata characterisation test\n # FIXME (Ole): Get this right when new resolution keyword\n # has been fully sorted out. There are 3 other tests failing at\n # the moment\n ref = {'layer_type': 'raster',\n 'keywords': {'category': 'hazard',\n 'subcategory': 'earthquake',\n 'resolution': '0.0112'},\n 'geotransform': (105.29857, 0.0112, 0.0,\n -5.565233000000001, 0.0, -0.0112),\n 'resolution': 0.0112,\n 'title': 'lembang_mmi_hazmap'}\n\n for key in ['layer_type', 'keywords', 'geotransform',\n 'title']:\n\n if key == 'keywords':\n kwds = metadata[key]\n for k in kwds:\n assert kwds[k] == ref[key][k]\n else:\n msg = ('Expected metadata for key %s to be %s. '\n 'Instead got %s' % (key, ref[key], metadata[key]))\n if key in ['geotransform', 'resolution']:\n assert numpy.allclose(metadata[key], ref[key]), msg\n else:\n assert metadata[key] == ref[key], msg\n\n def test_repeated_upload(self):\n \"\"\"The same file can be uploaded more than once\n \"\"\"\n thefile = os.path.join(TESTDATA, 'test_grid.asc')\n uploaded1 = save_to_geonode(thefile, overwrite=True,\n user=self.user)\n check_layer(uploaded1, full=True)\n uploaded2 = save_to_geonode(thefile, overwrite=True,\n user=self.user)\n check_layer(uploaded2, full=True)\n uploaded3 = save_to_geonode(thefile, overwrite=False,\n user=self.user)\n check_layer(uploaded3, full=True)\n\n msg = ('Expected %s but got %s' % (uploaded1.name, uploaded2.name))\n assert uploaded1.name == uploaded2.name, msg\n\n msg = ('Expected a different name when uploading %s using '\n 'overwrite=False but got %s' % (thefile, uploaded3.name))\n assert uploaded1.name != uploaded3.name, msg\n\n def test_layer_name_validation(self):\n \"\"\"Exception is raised when get_valid_layer_name is given a time object\n \"\"\"\n from geonode.maps.utils import get_valid_layer_name\n import datetime\n try:\n get_valid_layer_name(datetime.datetime.now())\n except GeoNodeException, e:\n pass\n else:\n msg = ('Get_valid_layer_name accepted a time'\n ' object and did not complain')\n assert False, msg\n\n def test_non_existing_file(self):\n \"\"\"RisikoException is returned for non existing file\n \"\"\"\n sampletxt = os.path.join(TESTDATA, 'smoothoperator.shp')\n try:\n save_to_geonode(sampletxt, user=self.user)\n except RisikoException, e:\n pass\n else:\n msg = ('Expected an exception for non existing file')\n assert False, msg\n\n def test_non_existing_dir(self):\n \"\"\"RisikoException is returned for non existing dir\n \"\"\"\n sampletxt = os.path.join(TESTDATA, 'smoothoperator')\n try:\n uploaded_layers = save_to_geonode(sampletxt, user=self.user)\n for uploaded in uploaded_layers:\n print uploaded\n except RisikoException, e:\n pass\n else:\n msg = ('Expected an exception for non existing dir')\n assert False, msg\n\n def test_cleanup(self):\n \"\"\"Cleanup functions in the utils module work\n \"\"\"\n from geonode.maps.utils import cleanup\n\n thefile = os.path.join(TESTDATA, 'lembang_mmi_hazmap.asc')\n uploaded = save_to_geonode(thefile, user=self.user, overwrite=True)\n check_layer(uploaded, full=True)\n\n name = uploaded.name\n uuid = uploaded.uuid\n pk = uploaded.pk\n\n # try calling the cleanup function when the django record exists:\n try:\n cleanup(name, uuid)\n except GeoNodeException, e:\n pass\n else:\n msg = ('Cleaup should raise an exception if the layer [%s]'\n ' exists in the django db' % name)\n assert False, msg\n\n # Manually delete the layer object with SQL\n from django.db import connection, transaction\n cursor = connection.cursor()\n cursor.execute('DELETE FROM maps_layer WHERE id = %d' % pk)\n transaction.commit_unless_managed()\n\n # After this, the records should not live in GeoServer or Geonetwork\n cleanup(name, uuid)\n\n #FIXME: Verify the record does not exist in GS or GN\n\n def test_keywords(self):\n \"\"\"Keywords are read correctly from the .keywords file\n \"\"\"\n\n for filename in ['Earthquake_Ground_Shaking.asc',\n 'Lembang_Earthquake_Scenario.asc',\n 'Padang_WGS84.shp']:\n\n _, ext = os.path.splitext(filename)\n thefile = os.path.join(TESTDATA, filename)\n uploaded = save_to_geonode(thefile, user=self.user, overwrite=True)\n\n # Get uploaded keywords from uploaded layer object\n uploaded_keywords = uploaded.keywords\n msg = 'No keywords found in layer %s' % uploaded.name\n assert len(uploaded_keywords) > 0, msg\n\n # Get reference keywords from file\n keywords_file = thefile.replace(ext, '.keywords')\n f = open(keywords_file, 'r')\n keywords_list = []\n for line in f.readlines():\n keywords_list.append(line.strip().replace(' ', ''))\n f.close()\n\n # Verify that every keyword from file has been uploaded\n for keyword in keywords_list:\n msg = 'Could not find keyword \"%s\" in %s' % (keyword,\n uploaded_keywords)\n assert keyword in uploaded_keywords, msg\n\n def test_metadata_twice(self):\n \"\"\"Layer metadata can be correctly uploaded multiple times\n \"\"\"\n\n # This test reproduces ticket #99 by creating new data,\n # uploading twice and verifying metadata\n\n # Base test data\n filenames = ['Lembang_Earthquake_Scenario.asc',\n 'lembang_schools.shp']\n\n for org_filename in filenames:\n org_basename, ext = os.path.splitext(os.path.join(TESTDATA,\n org_filename))\n\n # Copy data to temporary unique name\n basename = unique_filename(dir='/tmp')\n\n cmd = '/bin/cp %s.keywords %s.keywords' % (org_basename, basename)\n os.system(cmd)\n\n cmd = '/bin/cp %s.prj %s.prj' % (org_basename, basename)\n os.system(cmd)\n\n if ext == '.asc':\n layer_type = 'raster'\n filename = '%s.asc' % basename\n cmd = '/bin/cp %s.asc %s' % (org_basename, filename)\n os.system(cmd)\n elif ext == '.shp':\n layer_type = 'vector'\n filename = '%s.shp' % basename\n for e in ['shp', 'shx', 'sbx', 'sbn', 'dbf']:\n cmd = '/bin/cp %s.%s %s.%s' % (org_basename, e,\n basename, e)\n os.system(cmd)\n else:\n msg = ('Unknown layer extension in %s. '\n 'Expected .shp or .asc' % filename)\n raise Exception(msg)\n\n # Repeat multiple times\n for i in range(3):\n\n # Upload\n layer = save_to_geonode(filename, user=self.user,\n overwrite=True)\n\n # Get metadata\n layer_name = '%s:%s' % (layer.workspace, layer.name)\n metadata = get_metadata(INTERNAL_SERVER_URL,\n layer_name)\n\n # Verify\n assert 'id' in metadata\n assert 'title' in metadata\n assert 'layer_type' in metadata\n assert 'keywords' in metadata\n assert 'bounding_box' in metadata\n assert len(metadata['bounding_box']) == 4\n\n # Check integrity between Django layer and file\n assert_bounding_box_matches(layer, filename)\n\n # Check integrity between file and OWS metadata\n ref_bbox = get_bounding_box(filename)\n msg = ('Bounding box from OWS did not match bounding box '\n 'from file. They are\\n'\n 'From file %s: %s\\n'\n 'From OWS: %s' % (filename,\n ref_bbox,\n metadata['bounding_box']))\n\n assert numpy.allclose(metadata['bounding_box'],\n ref_bbox), msg\n assert layer.name == metadata['title']\n assert layer_name == metadata['id']\n assert layer_type == metadata['layer_type']\n\n # Check keywords\n if layer_type == 'raster':\n category = 'hazard'\n subcategory = 'earthquake'\n elif layer_type == 'vector':\n category = 'exposure'\n subcategory = 'building'\n else:\n msg = 'Unknown layer type %s' % layer_type\n raise Exception(msg)\n\n keywords = metadata['keywords']\n\n msg = 'Did not find key \"category\" in keywords: %s' % keywords\n assert 'category' in keywords, msg\n\n msg = ('Did not find key \"subcategory\" in keywords: %s'\n % keywords)\n assert 'subcategory' in keywords, msg\n\n msg = ('Category keyword %s did not match expected %s'\n % (keywords['category'], category))\n assert category == keywords['category'], msg\n\n msg = ('Subcategory keyword %s did not match expected %s'\n % (keywords['subcategory'], category))\n assert subcategory == keywords['subcategory'], msg\n\n def test_metadata(self):\n \"\"\"Metadata is retrieved correctly for both raster and vector data\n \"\"\"\n\n # Upload test data\n filenames = ['Lembang_Earthquake_Scenario.asc',\n 'Earthquake_Ground_Shaking.asc',\n 'lembang_schools.shp',\n 'Padang_WGS84.shp']\n layers = []\n paths = []\n for filename in filenames:\n basename, ext = os.path.splitext(filename)\n\n path = os.path.join(TESTDATA, filename)\n layer = save_to_geonode(path, user=self.user, overwrite=True)\n\n # Record layer and file\n layers.append(layer)\n paths.append(path)\n\n # Check integrity\n for i, layer in enumerate(layers):\n\n if filenames[i].endswith('.shp'):\n layer_type = 'vector'\n elif filenames[i].endswith('.asc'):\n layer_type = 'raster'\n else:\n msg = ('Unknown layer extension in %s. '\n 'Expected .shp or .asc' % filenames[i])\n raise Exception(msg)\n\n layer_name = '%s:%s' % (layer.workspace, layer.name)\n metadata = get_metadata(INTERNAL_SERVER_URL,\n layer_name)\n\n assert 'id' in metadata\n assert 'title' in metadata\n assert 'layer_type' in metadata\n assert 'keywords' in metadata\n assert 'bounding_box' in metadata\n assert len(metadata['bounding_box']) == 4\n\n # Check integrity between Django layer and file\n assert_bounding_box_matches(layer, paths[i])\n\n # Check integrity between file and OWS metadata\n ref_bbox = get_bounding_box(paths[i])\n msg = ('Bounding box from OWS did not match bounding box '\n 'from file. They are\\n'\n 'From file %s: %s\\n'\n 'From OWS: %s' % (paths[i],\n ref_bbox,\n metadata['bounding_box']))\n\n assert numpy.allclose(metadata['bounding_box'],\n ref_bbox), msg\n assert layer.name == metadata['title']\n assert layer_name == metadata['id']\n assert layer_type == metadata['layer_type']\n\n # Check keywords\n if layer_type == 'raster':\n category = 'hazard'\n subcategory = 'earthquake'\n elif layer_type == 'vector':\n category = 'exposure'\n subcategory = 'building'\n else:\n msg = 'Unknown layer type %s' % layer_type\n raise Exception(msg)\n\n keywords = metadata['keywords']\n\n msg = 'Did not find key \"category\" in keywords: %s' % keywords\n assert 'category' in keywords, msg\n\n msg = 'Did not find key \"subcategory\" in keywords: %s' % keywords\n assert 'subcategory' in keywords, msg\n\n msg = ('Category keyword %s did not match expected %s'\n % (keywords['category'], category))\n assert category == keywords['category'], msg\n\n msg = ('Subcategory keyword %s did not match expected %s'\n % (keywords['subcategory'], category))\n assert subcategory == keywords['subcategory'], msg\n\n def test_native_raster_resolution(self):\n \"\"\"Raster layer retains native resolution through Geoserver\n\n Raster layer can be uploaded and downloaded again with\n native resolution. This is one test for ticket #103\n \"\"\"\n\n hazard_filename = ('%s/maumere_aos_depth_20m_land_wgs84.asc'\n % TESTDATA)\n\n # Get reference values\n H = read_layer(hazard_filename)\n A_ref = H.get_data(nan=True)\n depth_min_ref, depth_max_ref = H.get_extrema()\n\n # Upload to internal geonode\n hazard_layer = save_to_geonode(hazard_filename, user=self.user)\n hazard_name = '%s:%s' % (hazard_layer.workspace, hazard_layer.name)\n\n # Download data again with native resolution\n bbox = get_bounding_box_string(hazard_filename)\n H = download(INTERNAL_SERVER_URL, hazard_name, bbox)\n A = H.get_data(nan=True)\n\n # Compare shapes\n msg = ('Shape of downloaded raster was [%i, %i]. '\n 'Expected [%i, %i].' % (A.shape[0], A.shape[1],\n A_ref.shape[0], A_ref.shape[1]))\n assert numpy.allclose(A_ref.shape, A.shape, rtol=0, atol=0), msg\n\n # Compare extrema to values reference values (which have also been\n # verified by QGIS for this layer and tested in test_engine.py)\n depth_min, depth_max = H.get_extrema()\n msg = ('Extrema of downloaded file were [%f, %f] but '\n 'expected [%f, %f]' % (depth_min, depth_max,\n depth_min_ref, depth_max_ref))\n assert numpy.allclose([depth_min, depth_max],\n [depth_min_ref, depth_max_ref],\n rtol=1.0e-6, atol=1.0e-10), msg\n\n # Compare data number by number\n assert nanallclose(A, A_ref, rtol=1.0e-8)\n\n def test_specified_raster_resolution(self):\n \"\"\"Raster layers can be downloaded with specific resolution\n\n This is another test for ticket #103\n\n Native test data:\n\n maumere....asc\n ncols 931\n nrows 463\n cellsize 0.00018\n\n Population_Jakarta\n ncols 638\n nrows 649\n cellsize 0.00045228819716044\n\n Population_2010\n ncols 5525\n nrows 2050\n cellsize 0.0083333333333333\n\n\n Here we download it at a range of fixed resolutions that\n are both coarser and finer, and check that the dimensions\n of the downloaded matrix are as expected.\n\n We also check that the extrema of the subsampled matrix are sane\n \"\"\"\n\n for test_filename in ['maumere_aos_depth_20m_land_wgs84.asc',\n 'Population_Jakarta_geographic.asc',\n 'Population_2010.asc']:\n\n hazard_filename = ('%s/%s' % (TESTDATA, test_filename))\n\n # Get reference values\n H = read_layer(hazard_filename)\n depth_min_ref, depth_max_ref = H.get_extrema()\n native_resolution = H.get_resolution()\n\n # Upload to internal geonode\n hazard_layer = save_to_geonode(hazard_filename, user=self.user)\n hazard_name = '%s:%s' % (hazard_layer.workspace,\n hazard_layer.name)\n\n # Test for a range of resolutions\n for res in [0.02, 0.01, 0.005, 0.002, 0.001, 0.0005, # Coarser\n 0.0002, 0.0001, 0.00006, 0.00003]: # Finer\n\n # To save time don't do finest resolution for the\n # two population sets\n if test_filename.startswith('Population') and res < 0.00006:\n break\n\n # Set bounding box\n bbox = get_bounding_box_string(hazard_filename)\n compare_extrema = True\n if test_filename == 'Population_2010.asc':\n # Make bbox small for finer resolutions to\n # save time and to test that as well.\n # However, extrema obviously won't match those\n # of the full dataset. Once we can clip\n # datasets, we can remove this restriction.\n if res < 0.005:\n bbox = '106.685974,-6.373421,106.974534,-6.079886'\n compare_extrema = False\n bb = bboxstring2list(bbox)\n\n # Download data at specified resolution\n H = download(INTERNAL_SERVER_URL, hazard_name,\n bbox, resolution=res)\n A = H.get_data()\n\n # Verify that data has the requested bobx and resolution\n actual_bbox = H.get_bounding_box()\n msg = ('Bounding box for %s was not as requested. I got %s '\n 'but '\n 'expected %s' % (hazard_name, actual_bbox, bb))\n assert numpy.allclose(actual_bbox, bb, rtol=1.0e-6)\n\n # FIXME (Ole): How do we sensibly resolve the issue with\n # resx, resy vs one resolution (issue #173)\n actual_resolution = H.get_resolution()[0]\n\n # FIXME (Ole): Resolution is often far from the requested\n # see issue #102\n # Here we have to accept up to 5%\n tolerance102 = 5.0e-2\n msg = ('Resolution of %s was not as requested. I got %s but '\n 'expected %s' % (hazard_name, actual_resolution, res))\n assert numpy.allclose(actual_resolution, res,\n rtol=tolerance102), msg\n\n # Determine expected shape from bbox (W, S, E, N)\n ref_rows = int(round((bb[3] - bb[1]) / res))\n ref_cols = int(round((bb[2] - bb[0]) / res))\n\n # Compare shapes (generally, this may differ by 1)\n msg = ('Shape of downloaded raster was [%i, %i]. '\n 'Expected [%i, %i].' % (A.shape[0], A.shape[1],\n ref_rows, ref_cols))\n assert (ref_rows == A.shape[0] and\n ref_cols == A.shape[1]), msg\n\n # Assess that the range of the interpolated data is sane\n if not compare_extrema:\n continue\n\n # For these test sets we get exact match of the minimum\n msg = ('Minimum of %s resampled at resolution %f '\n 'was %f. Expected %f.' % (hazard_layer.name,\n res,\n numpy.nanmin(A),\n depth_min_ref))\n assert numpy.allclose(depth_min_ref, numpy.nanmin(A),\n rtol=0.0, atol=0.0), msg\n\n # At the maximum it depends on the subsampling\n msg = ('Maximum of %s resampled at resolution %f '\n 'was %f. Expected %f.' % (hazard_layer.name,\n res,\n numpy.nanmax(A),\n depth_max_ref))\n if res < native_resolution[0]:\n # When subsampling to finer resolutions we expect a\n # close match\n assert numpy.allclose(depth_max_ref, numpy.nanmax(A),\n rtol=1.0e-10, atol=1.0e-8), msg\n elif res < native_resolution[0] * 10:\n # When upsampling to coarser resolutions we expect\n # ballpark match (~20%)\n assert numpy.allclose(depth_max_ref, numpy.nanmax(A),\n rtol=0.17, atol=0.0), msg\n else:\n # Upsampling to very coarse resolutions, just want sanity\n assert 0 < numpy.nanmax(A) <= depth_max_ref\n\n def test_raster_scaling(self):\n \"\"\"Raster layers can be scaled when resampled\n\n This is a test for ticket #168\n\n Native test .asc data has\n\n ncols 5525\n nrows 2050\n cellsize 0.0083333333333333\n\n Scaling is necessary for raster data that represents density\n such as population per km^2\n \"\"\"\n\n for test_filename in ['Population_Jakarta_geographic.asc',\n 'Population_2010.asc']:\n\n raster_filename = ('%s/%s' % (TESTDATA, test_filename))\n\n # Get reference values\n R = read_layer(raster_filename)\n R_min_ref, R_max_ref = R.get_extrema()\n native_resolution = R.get_resolution()\n\n # Upload to internal geonode\n raster_layer = save_to_geonode(raster_filename, user=self.user)\n raster_name = '%s:%s' % (raster_layer.workspace,\n raster_layer.name)\n\n # Test for a range of resolutions\n for res in [0.02, 0.01, 0.005, 0.002, 0.001, 0.0005, # Coarser\n 0.0002]: # Finer\n\n # To save time don't do finest resolution for the\n # large population set\n if test_filename.startswith('Population_2010') and res < 0.005:\n break\n\n bbox = get_bounding_box_string(raster_filename)\n\n R = download(INTERNAL_SERVER_URL, raster_name,\n bbox, resolution=res)\n A_native = R.get_data(scaling=False)\n A_scaled = R.get_data(scaling=True)\n\n sigma = (R.get_resolution()[0] / native_resolution[0]) ** 2\n\n # Compare extrema\n expected_scaled_max = sigma * numpy.nanmax(A_native)\n msg = ('Resampled raster was not rescaled correctly: '\n 'max(A_scaled) was %f but expected %f'\n % (numpy.nanmax(A_scaled), expected_scaled_max))\n\n assert numpy.allclose(expected_scaled_max,\n numpy.nanmax(A_scaled),\n rtol=1.0e-8, atol=1.0e-8), msg\n\n expected_scaled_min = sigma * numpy.nanmin(A_native)\n msg = ('Resampled raster was not rescaled correctly: '\n 'min(A_scaled) was %f but expected %f'\n % (numpy.nanmin(A_scaled), expected_scaled_min))\n assert numpy.allclose(expected_scaled_min,\n numpy.nanmin(A_scaled),\n rtol=1.0e-8, atol=1.0e-12), msg\n\n # Compare elementwise\n msg = 'Resampled raster was not rescaled correctly'\n assert nanallclose(A_native * sigma, A_scaled,\n rtol=1.0e-8, atol=1.0e-8), msg\n\n # Check that it also works with manual scaling\n A_manual = R.get_data(scaling=sigma)\n msg = 'Resampled raster was not rescaled correctly'\n assert nanallclose(A_manual, A_scaled,\n rtol=1.0e-8, atol=1.0e-8), msg\n\n # Check that an exception is raised for bad arguments\n try:\n R.get_data(scaling='bad')\n except:\n pass\n else:\n msg = 'String argument should have raised exception'\n raise Exception(msg)\n\n try:\n R.get_data(scaling='(1, 3)')\n except:\n pass\n else:\n msg = 'Tuple argument should have raised exception'\n raise Exception(msg)\n\n # Check None option without existence of density keyword\n A_none = R.get_data(scaling=None)\n msg = 'Data should not have changed'\n assert nanallclose(A_native, A_none,\n rtol=1.0e-12, atol=1.0e-12), msg\n\n # Try with None and density keyword\n R.keywords['density'] = 'true'\n A_none = R.get_data(scaling=None)\n msg = 'Resampled raster was not rescaled correctly'\n assert nanallclose(A_scaled, A_none,\n rtol=1.0e-12, atol=1.0e-12), msg\n\n R.keywords['density'] = 'Yes'\n A_none = R.get_data(scaling=None)\n msg = 'Resampled raster was not rescaled correctly'\n assert nanallclose(A_scaled, A_none,\n rtol=1.0e-12, atol=1.0e-12), msg\n\n R.keywords['density'] = 'False'\n A_none = R.get_data(scaling=None)\n msg = 'Data should not have changed'\n assert nanallclose(A_native, A_none,\n rtol=1.0e-12, atol=1.0e-12), msg\n\n R.keywords['density'] = 'no'\n A_none = R.get_data(scaling=None)\n msg = 'Data should not have changed'\n assert nanallclose(A_native, A_none,\n rtol=1.0e-12, atol=1.0e-12), msg\n\n def test_keywords_download(self):\n \"\"\"Keywords are downloaded from GeoServer along with layer data\n \"\"\"\n\n # Upload test data\n filenames = ['Lembang_Earthquake_Scenario.asc',\n 'Padang_WGS84.shp',\n 'maumere_aos_depth_20m_land_wgs84.asc']\n layers = []\n paths = []\n for filename in filenames:\n basename, ext = os.path.splitext(filename)\n\n path = os.path.join(TESTDATA, filename)\n\n # Upload to GeoNode\n layer = save_to_geonode(path, user=self.user, overwrite=True)\n\n # Record layer and file\n layers.append(layer)\n paths.append(path)\n\n # Check integrity\n for i, layer in enumerate(layers):\n\n # Get reference keyword dictionary from file\n L = read_layer(paths[i])\n ref_keywords = L.get_keywords()\n\n # Get keywords metadata from GeoServer\n layer_name = '%s:%s' % (layer.workspace, layer.name)\n metadata = get_metadata(INTERNAL_SERVER_URL,\n layer_name)\n assert 'keywords' in metadata\n geo_keywords = metadata['keywords']\n msg = ('Uploaded keywords were not as expected: I got %s '\n 'but expected %s' % (geo_keywords, ref_keywords))\n for kw in ref_keywords:\n # Check that all keywords were uploaded\n # It is OK for new automatic keywords to have appeared\n # (e.g. resolution) - see issue #171\n assert kw in geo_keywords, msg\n assert ref_keywords[kw] == geo_keywords[kw], msg\n\n # Download data\n bbox = get_bounding_box_string(paths[i])\n H = download(INTERNAL_SERVER_URL, layer_name, bbox)\n\n dwn_keywords = H.get_keywords()\n msg = ('Downloaded keywords were not as expected: I got %s '\n 'but expected %s' % (dwn_keywords, geo_keywords))\n assert geo_keywords == dwn_keywords, msg\n\n # Check that the layer and its .keyword file is there.\n msg = 'Downloaded layer %s was not found' % H.filename\n assert os.path.isfile(H.filename), msg\n\n kw_filename = os.path.splitext(H.filename)[0] + '.keywords'\n msg = 'Downloaded keywords file %s was not found' % kw_filename\n assert os.path.isfile(kw_filename), msg\n\n # Check that keywords are OK when reading downloaded file\n L = read_layer(H.filename)\n read_keywords = L.get_keywords()\n msg = ('Keywords in downloaded file %s were not as expected: '\n 'I got %s but expected %s'\n % (kw_filename, read_keywords, geo_keywords))\n assert read_keywords == geo_keywords, msg\n\n\nif __name__ == '__main__':\n os.environ['DJANGO_SETTINGS_MODULE'] = 'risiko.settings'\n suite = unittest.makeSuite(Test_geonode_connection, 'test')\n runner = unittest.TextTestRunner(verbosity=2)\n runner.run(suite)\n" }, { "alpha_fraction": 0.753742516040802, "alphanum_fraction": 0.757485032081604, "avg_line_length": 31.560976028442383, "blob_id": "c7652cd6365208f9a76049e2ca625a03325fd009", "content_id": "13fef26d153bf716b6cd5d91266a4805070cf860", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1336, "license_type": "no_license", "max_line_length": 207, "num_lines": 41, "path": "/docs/deployment/production_install.rst", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "Deployment\n==========\n\nProduction installation \n-----------------------\n\n(Obsolete!)\n\nThis is for installing Risiko as a public web server.\n\nIt is assumed that a development system is already running (installed as per instructions above) and that the production system is a separate server that can be accessed via ssh (ssh [email protected]).\n\nTo deploy RISIKO in production mode from your development system to the remote server run the following::\n\n risiko-activate\n cd $RIAB_HOME/riab/extras\n fab risiko -H [email protected]\n\nIf something goes wrong, you can check the logs with the command::\n\n fab log -H [email protected]\n\nYou can update an existing production system to the latest revision with the command::\n\n fab pull -H [email protected]\n\n\nThe production deployment procedure is scripted in the file fabfile.py and the fabric framework is documented at http://docs.fabfile.org\n\nLive USB drive\n--------------\n\nInstall RISIKO on a live USB drive.\n\n# This text should probably go into a different file\n\n1. Create Ubuntu live USB drive:\n - http://www.pendrivelinux.com/universal-usb-installer-easy-as-1-2-3 or http://www.linuxliveusb.com/\n - https://wiki.ubuntu.com/LiveUsbPendrivePersistent (paragraph starting with \"To make the persistence larger\")\n\n2. Install Risiko normally as detailed in INSTALL.rst\n\n" }, { "alpha_fraction": 0.7356593608856201, "alphanum_fraction": 0.7474867105484009, "avg_line_length": 41.78480911254883, "blob_id": "2eb80d1254f31e97a2c86c6416a0092227cf42b6", "content_id": "847f84718de8e3cc00ab83f7e96e4002bdd3aee5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 3382, "license_type": "no_license", "max_line_length": 262, "num_lines": 79, "path": "/docs/intro/basic_install.rst", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "========================\nQuick-Start Installation\n========================\n\nThis is the project: \"Risiko - Risk in a Box\".\nThe latest source code is available in https://github.com/AIFDR/riab/tree/master/impact which contains modules for risk calculations, gis functionality and plugin management.\n\nFor more information about Risk In a Box please look at\nour documentation on http://risiko.readthedocs.org (for the current stable release) or http://risiko_dev.readthedocs.org (for the most recent development version)\n\n.. FIXME: Make ole link http://riab.readthedocs.org obsolete\n\nThese are the instructions for installation of Risiko in development mode (for changing the software) and production mode (for deploying on a server).\n\n\n-------------------\nSystem Requirements\n-------------------\n\n - A standard PC with at least 4GB of RAM.\n - The operating system is a recent version of Ubuntu Linux (http://www.ubuntu.com). Risiko has been tested on versions 10.04, 10.10 and 11.04 (32 and 64 bit).\n - The platform is using the default /etc/sources.list as it comes in a fresh Ubuntu installation. You may want to change this to a local mirror if the internet connection is slow (see e.g. https://help.ubuntu.com/community/Repositories/CommandLine) for details.\n - The user installing and running Risiko has administrator rights (using the sudo)\n\n.. _sec-quick-start:\n\n------------------------\nDevelopment installation\n------------------------\n\nThis is for those who either want to try out the software and/or modify it. For installing Risiko as a public web server please see instructions for production installation.\n\nTo install a RISIKO development environment, start a terminal window, cd to your favorite development area and run the following::\n\n wget http://bit.ly/risiko-install\n bash risiko-install\n\nThis will create a working development installation and provide guidance on how to run the test suite, setup the server and try it.\n\nTo run the test suite, you'll need the commands::\n\n risiko-activate\n risiko-test\n\nTo upload the bundled demo data, you'll need to do the following::\n\n risiko-activate \n risiko-clean \n risiko-start \n risiko-upload risiko_demo_data \n\nwhen this is finished point the browser to 127.0.0.1:8000, select layers and try the risk calculator.\n\n\n\nNote:\nIf you wish to commit changes back to the repository, you must\n 1. Get an account on github.com\n 2. Get commit access to https://github.com/AIFDR/riab\n 3. Setup and register your ssh keys with your account: https://github.com/account/ssh\n\n\n\n\n===========\nLimitations\n===========\n\nRisiko is a very new project. The current code development started in earnest in March 2011 and there is still much to be done.\nHowever, we work on the philosophy that stakeholders should have access to the development and source code from the very beginning and invite comments, suggestions and contributions.\n\n\nAs such, Risiko currently has some major limitations, including\n\n * Risiko does not yet run with data loaded locally. Rather it points to a GeoServer with demo data at www.aifdr.org:8080/geoserver\n * Hazard layers must be provided as raster data\n * Exposure data must be either raster data or point vector data\n * All data must be provided in WGS84 geographic coordinates\n * Neither AIFDR nor GFDRR take any responsibility for the correctness of outputs from Risiko or decisions derived as a consequence\n\n\n" }, { "alpha_fraction": 0.610184371471405, "alphanum_fraction": 0.6128182411193848, "avg_line_length": 18.27118682861328, "blob_id": "010e305ee2fbeb6e7bad4f054229d145d55ad617", "content_id": "386f2f55be2f84ede75b35921cbb61ef8a8e3a15", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1139, "license_type": "no_license", "max_line_length": 127, "num_lines": 59, "path": "/docs/usage/risiko_calculator.rst", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "=================\nHow to use Risiko\n=================\n\nIntroduction\n------------\n\nRisiko is based on geonode (http://geonode.org/).\n\n.. figure:: /images/main_screen_guide.png\n\t:scale: 100 %\n \n Risiko screenshot of main calculation area\n\n\n\n#. The selected Hazard layer\n#. The selected Exposure layer\n#. The function to use to determine the Impact from the Hazard and Exposure\n#. Button to reset the layers and any calculation\n#. Button to start the calculation\n#. The list of the base map layers \n#. The list of the Hazard and Impact layers\n#. The Legend for the map\n#. The info button - allows the selection of point of calculation impact points (e.g. houses) and shows details for that point.\n#. Language Selection\n#. The main calculation map\n\n\n.. Note:: \n\n This page is a placeholder for a more complete user guide.\n\nHazard Layer\n------------\n\nExposure Layer\n--------------\n\nImpact Function\n---------------\n\nInterpreting the Output\n-----------------------\n\nBase Map Layers\n---------------\n\nHazard and Impact Layers\n------------------------\n\nLegend\n------\n\nThe Toolbar\n-----------\n\nLanguage Selection\n------------------\n\n\n" }, { "alpha_fraction": 0.45517241954803467, "alphanum_fraction": 0.45517241954803467, "avg_line_length": 43.61538314819336, "blob_id": "d15ab694d605e6e1a66a68c4b0745b51ce97e2ef", "content_id": "03c43100e9cd2b52eaad2def8d87d3c0c6b777a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 580, "license_type": "no_license", "max_line_length": 76, "num_lines": 13, "path": "/impact/urls.py", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "from django.conf.urls.defaults import patterns, url\n\nurlpatterns = patterns('',\n url(r'^$',\n 'django.views.generic.simple.direct_to_template',\n {'template': 'impact/calculator.html'},\n name='calculator'))\n\nurlpatterns += patterns('impact.views',\n url(r'^api/calculate/$', 'calculate'),\n url(r'^api/layers/$', 'layers'),\n url(r'^api/functions/$', 'functions'),\n url(r'^api/debug/$', 'debug'))\n" }, { "alpha_fraction": 0.6408839821815491, "alphanum_fraction": 0.6512430906295776, "avg_line_length": 28.855670928955078, "blob_id": "72b424a72e9ee144ba466f34c2420560efe7e281", "content_id": "7b845630037bdd6afeb8ed9697362b3e1886d801", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2896, "license_type": "no_license", "max_line_length": 72, "num_lines": 97, "path": "/impact/tests/test_plugin_core.py", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "import unittest\nfrom impact import plugins\n\nimport numpy\nimport sys\nimport os\nimport unittest\nimport warnings\n\nfrom impact.plugins.core import FunctionProvider\nfrom impact.plugins.core import requirements_collect\nfrom impact.plugins.core import requirement_check\nfrom impact.plugins.core import requirements_met\nfrom impact.plugins.core import get_plugins\nfrom impact.plugins.core import compatible_layers\n\n\nclass BasicFunction(FunctionProvider):\n \"\"\"Risk plugin for testing\n\n :author Allen\n :rating 1\n :param requires category==\"hazard\"\n :param requires unit==\"mmi\"\n \"\"\"\n\n @staticmethod\n def run(H, E,\n a=0.97429, b=11.037):\n\n return None\n\n\nclass SyntaxErrorFunction(FunctionProvider):\n \"\"\"Risk plugin for testing\n\n :author Allen\n :rating 1\n :param requires category==\"hazard\"\n :param requires unit=\"mmi\" #Note the error should be ==\n \"\"\"\n\n @staticmethod\n def run(H, E,\n a=0.97429, b=11.037):\n return None\n\n\nclass Test_plugin_core(unittest.TestCase):\n \"\"\"Tests of Risiko calculations\n \"\"\"\n\n def test_basic_plugin_requirements(self):\n \"\"\"Basic plugin requirements collection\n \"\"\"\n requirelines = requirements_collect(BasicFunction)\n params = {'category': 'hazard', 'unit': 'mmi'}\n assert requirements_met(requirelines, params)\n\n params = {'category': 'exposure', 'unit': 'mmi2'}\n assert requirements_met(requirelines, params, True) == False\n\n def test_basic_plugin_requirements_met(self):\n \"\"\"Basic plugin requirements met\n \"\"\"\n requirelines = requirements_collect(BasicFunction)\n valid_return = ['category==\"hazard\"', 'unit==\"mmi\"']\n for ret1, ret2 in zip(valid_return, requirelines):\n assert ret1 == ret2, \"Error in requirements extraction\"\n\n def test_basic_requirements_check(self):\n \"\"\"Basic plugin requirements check\n \"\"\"\n requirelines = requirements_collect(BasicFunction)\n params = {'category': 'exposure'}\n for line in requirelines:\n check = requirement_check(params, line)\n assert check == False\n\n line = \"unit='mmi'\"\n params = {'category': 'exposure'}\n msg = 'Malformed statement (logged)'\n assert requirement_check(params, line) == False, msg\n #self.assertRaises(SyntaxError, requirement_check, params, line)\n\n def test_keywords_error(self):\n \"\"\" Handling of reserved python keywords \"\"\"\n line = \"unit=='mmi'\"\n params = {'class': 'myclass'}\n msg = 'Reserved keyword in statement (logged)'\n assert requirement_check(params, line) == False, msg\n\nif __name__ == '__main__':\n os.environ['DJANGO_SETTINGS_MODULE'] = 'risiko.settings'\n suite = unittest.makeSuite(Test_plugin_core, 'test')\n runner = unittest.TextTestRunner(verbosity=2)\n runner.run(suite)\n" }, { "alpha_fraction": 0.487366259098053, "alphanum_fraction": 0.5174140930175781, "avg_line_length": 38.576576232910156, "blob_id": "1bedddb3881229468783f0760d19358b58bc3123", "content_id": "9eb7caa5a32317735fb5eb70d3fa5a0ee4d69f4d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13179, "license_type": "no_license", "max_line_length": 195, "num_lines": 333, "path": "/impact/plugins/earthquake/padang_building_impact_model.py", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "\"\"\"Impact function based on Padang 2009 post earthquake survey\n\nThis impact function estimates percentual damage to buildings as a\nfunction of ground shaking measured in MMI.\nBuildings are assumed to fall the 9 classes below as described in\nthe Geoscience Australia/ITB 2009 Padang earthquake\nsurvey (http://trove.nla.gov.au/work/38470066).\n\nClass Building Type Median (MMI) Beta (MMI)\n-------------------------------------------------------------------------\n1 URM with river rock walls 7.5 0.11\n2 URM with Metal Roof 8.3 0.1\n3 Timber frame with masonry in-fill 8.8 0.11\n4 RC medium rise Frame with Masonry in-fill walls 8.4 0.05\n5 Timber frame with stucco in-fill 9.2 0.11\n6 Concrete Shear wall high rise* Hazus C2H 9.7 0.15\n7 RC low rise Frame with Masonry in-fill walls 9 0.08\n8 Confined Masonry 8.9 0.07\n9 Timber frame residential 10.5 0.15\n\"\"\"\n\nfrom django.template.loader import render_to_string\nfrom impact.plugins.core import FunctionProvider\nfrom impact.storage.vector import Vector\nfrom django.utils.translation import ugettext as _\nfrom impact.plugins.utilities import PointZoomSize\nfrom impact.plugins.utilities import PointClassColor\nfrom impact.plugins.utilities import PointSymbol\nfrom impact.plugins.mappings import osm2padang\nimport scipy.stats\n\n\n# Damage curves for each of the nine classes derived from the Padang survey\ndamage_curves = {'1': dict(median=7.5, beta=0.11),\n '2': dict(median=8.3, beta=0.1),\n '3': dict(median=8.8, beta=0.11),\n '4': dict(median=8.4, beta=0.05),\n '5': dict(median=9.2, beta=0.11),\n '6': dict(median=9.7, beta=0.15),\n '7': dict(median=9.0, beta=0.08),\n '8': dict(median=8.9, beta=0.07),\n '9': dict(median=10.5, beta=0.15)}\n\n\nclass PadangEarthquakeBuildingDamageFunction(FunctionProvider):\n \"\"\"Risk plugin for Padang earthquake damage to buildings\n\n :param requires category=='hazard' and \\\n subcategory.startswith('earthquake') and \\\n layer_type=='raster'\n :param requires category=='exposure' and \\\n subcategory.startswith('building') and \\\n layer_type=='vector' and \\\n datatype in ['osm', 'itb']\n \"\"\"\n\n def run(self, layers):\n \"\"\"Risk plugin for earthquake school damage\n \"\"\"\n\n # Extract data\n H = layers[0] # Ground shaking\n E = layers[1] # Building locations\n\n # print\n # print 'kw', E.get_keywords()\n # print\n # FIXME (Ole): Why doesn't this layer have keywords? See issue #164\n # Need keyword identifier for each kind of building dataset.\n # if 'osm' in E.get_keywords('type'):\n # FIXME (Ole): Not very robust way of deciding\n if E.get_name().lower().startswith('osm'):\n # Map from OSM attributes to the padang building classes\n E = osm2padang(E)\n vclass_tag = 'VCLASS'\n else:\n vclass_tag = 'TestBLDGCl'\n\n # Interpolate hazard level to building locations\n H = H.interpolate(E)\n\n # Extract relevant numerical data\n coordinates = E.get_geometry()\n shaking = H.get_data()\n N = len(shaking)\n\n # List attributes to carry forward to result layer\n attributes = E.get_attribute_names()\n\n # Calculate building damage\n count50 = 0\n count25 = 0\n count10 = 0\n count0 = 0\n building_damage = []\n for i in range(N):\n mmi = float(shaking[i].values()[0])\n\n building_class = E.get_data(vclass_tag, i)\n\n building_type = str(int(building_class))\n damage_params = damage_curves[building_type]\n percent_damage = scipy.stats.lognorm.cdf(mmi,\n damage_params['beta'],\n scale=damage_params['median']) * 100\n\n # Collect shake level and calculated damage\n result_dict = {self.target_field: percent_damage,\n 'MMI': mmi}\n\n # Carry all orginal attributes forward\n for key in attributes:\n result_dict[key] = E.get_data(key, i)\n\n # Record result for this feature\n building_damage.append(result_dict)\n\n # Calculate statistics\n if percent_damage < 10:\n count0 += 1\n\n if 10 <= percent_damage < 25:\n count10 += 1\n\n if 25 <= percent_damage < 50:\n count25 += 1\n\n if 50 <= percent_damage:\n count50 += 1\n\n # Create report\n caption = ('<font size=\"3\"> <table border=\"0\" width=\"320px\">'\n ' <tr><th><b>%s</b></th><th><b>%s</b></th></th>'\n ' <tr></tr>'\n ' <tr><td>%s&#58;</td><td>%i</td></tr>'\n ' <tr><td>%s (<10%%)&#58;</td><td>%i</td></tr>'\n ' <tr><td>%s (10-25%%)&#58;</td><td>%i</td></tr>'\n ' <tr><td>%s (25-50%%)&#58;</td><td>%i</td></tr>'\n ' <tr><td>%s (50-100%%)&#58;</td><td>%i</td></tr>'\n '</table></font>' % (_('Buildings'), _('Total'),\n _('All'), N,\n _('No damage'), count0,\n _('Low damage'), count10,\n _('Medium damage'), count25,\n _('High damage'), count50))\n\n # Create vector layer and return\n V = Vector(data=building_damage,\n projection=E.get_projection(),\n geometry=coordinates,\n name='Estimated pct damage',\n keywords={'caption': caption})\n return V\n\n def generate_style(self, data):\n \"\"\"Generates and SLD file based on the data values\n \"\"\"\n\n if data.is_point_data:\n return self.generate_point_style(data)\n elif data.is_polygon_data:\n return self.generate_polygon_style(data)\n else:\n msg = 'Unknown style %s' % str(data)\n raise Exception(msg)\n\n def generate_point_style(self, data):\n \"\"\"Generates and SLD file based on the data values\n \"\"\"\n\n # Define default behaviour to be used when\n # - symbol attribute is missing\n # - attribute value is None or ''\n DEFAULT_SYMBOL = 'circle'\n\n symbol_field = None\n\n # FIXME: Replace these by dict and extend below\n symbol_keys = [None, '']\n symbol_values = [DEFAULT_SYMBOL, DEFAULT_SYMBOL]\n\n # Predefined scales and corresponding font sizes\n scale_keys = [10000000000, 10000000, 5000000,\n 1000000, 500000, 250000, 100000]\n scale_values = [3, 5, 8, 12, 14, 16, 18]\n\n # Predefined colour classes\n class_keys = ['No Damage', '10-25', '25-50', '50-100']\n class_values = [{'min': 0, 'max': 10,\n 'color': '#cccccc', 'opacity': '1'},\n {'min': 10, 'max': 25,\n 'color': '#fecc5c', 'opacity': '1'},\n {'min': 25, 'max': 50,\n 'color': '#fd8d3c', 'opacity': '1'},\n {'min': 50, 'max': 100,\n 'color': '#e31a1c', 'opacity': '1'}]\n\n # Definition of symbols for each attribute value\n if self.symbol_field in data.get_attribute_names():\n\n # Get actual symbol field to use\n symbol_field = self.symbol_field\n\n symbols = {'Church/Mosque': 'ttf://ESRI US MUTCD 3#0x00F6',\n 'Commercial (office)': 'ttf://ESRI Business#0x0040',\n 'Hotel': 'ttf://ESRI Public1#0x00b6',\n 'Medical facility': 'ttf://ESRI Cartography#0x00D1',\n 'Other': 'ttf://ESRI Business#0x002D',\n 'Other industrial': 'ttf://ESRI Business#0x0043',\n 'Residential': 'ttf://ESRI Cartography#0x00d7',\n 'Retail': 'ttf://Comic Sans MS#0x0024',\n 'School': 'ttf://ESRI Cartography#0x00e5',\n 'Unknown': 'ttf://Comic Sans MS#0x003F',\n 'Warehouse': 'ttf://ESRI US MUTCD 3#0x00B5'}\n else:\n symbols = {None: DEFAULT_SYMBOL, '': DEFAULT_SYMBOL}\n\n # Generate sld style file\n params = dict(name=data.get_name(),\n damage_field=self.target_field,\n symbol_field=symbol_field,\n symbols=symbols,\n scales=dict(zip(scale_keys, scale_values)),\n classifications=dict(zip(class_keys, class_values)))\n\n # The styles are in $RIAB_HOME/riab/impact/templates/impact/styles\n return render_to_string('impact/styles/point_classes.sld', params)\n\n def generate_polygon_style(self, data):\n \"\"\"Generates and SLD file based on the data values\n \"\"\"\n\n # FIXME (Ole): Return static style to start with: ticket #144\n style = \"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<sld:StyledLayerDescriptor xmlns=\"http://www.opengis.net/sld\" xmlns:sld=\"http://www.opengis.net/sld\" xmlns:ogc=\"http://www.opengis.net/ogc\" xmlns:gml=\"http://www.opengis.net/gml\" version=\"1.0.0\">\n <sld:NamedLayer>\n <sld:Name>earthquake_impact</sld:Name>\n <sld:UserStyle>\n <sld:Name>earthquake_impact</sld:Name>\n <sld:Title/>\n <sld:FeatureTypeStyle>\n <sld:Name>name</sld:Name>\n <sld:Rule>\n <sld:Name>1</sld:Name>\n <sld:Title>Low</sld:Title>\n <ogc:Filter>\n <ogc:PropertyIsLessThan>\n <ogc:PropertyName>%s</ogc:PropertyName>\n <ogc:Literal>10</ogc:Literal>\n </ogc:PropertyIsLessThan>\n </ogc:Filter>\n <sld:PolygonSymbolizer>\n <sld:Fill>\n <sld:CssParameter name=\"fill\">#CCCCCC</sld:CssParameter>\n </sld:Fill>\n <sld:Stroke>\n <sld:CssParameter name=\"stroke\">#BCBCBC</sld:CssParameter>\n </sld:Stroke>\n </sld:PolygonSymbolizer>\n </sld:Rule>\n <sld:Rule>\n <sld:Name>2</sld:Name>\n <sld:Title>Medium</sld:Title>\n <ogc:Filter>\n <ogc:And>\n <ogc:PropertyIsGreaterThanOrEqualTo>\n <ogc:PropertyName>%s</ogc:PropertyName>\n <ogc:Literal>10</ogc:Literal>\n </ogc:PropertyIsGreaterThanOrEqualTo>\n <ogc:PropertyIsLessThan>\n <ogc:PropertyName>%s</ogc:PropertyName>\n <ogc:Literal>25</ogc:Literal>\n </ogc:PropertyIsLessThan>\n </ogc:And>\n </ogc:Filter>\n <sld:PolygonSymbolizer>\n <sld:Fill>\n <sld:CssParameter name=\"fill\">#FECC5C</sld:CssParameter>\n </sld:Fill>\n <sld:Stroke>\n <sld:CssParameter name=\"stroke\">#EEBC4C</sld:CssParameter>\n </sld:Stroke>\n </sld:PolygonSymbolizer>\n </sld:Rule>\n <sld:Rule>\n <sld:Name>3</sld:Name>\n <sld:Title>Medium</sld:Title>\n <ogc:Filter>\n <ogc:And>\n <ogc:PropertyIsGreaterThanOrEqualTo>\n <ogc:PropertyName>%s</ogc:PropertyName>\n <ogc:Literal>25</ogc:Literal>\n </ogc:PropertyIsGreaterThanOrEqualTo>\n <ogc:PropertyIsLessThan>\n <ogc:PropertyName>%s</ogc:PropertyName>\n <ogc:Literal>50</ogc:Literal>\n </ogc:PropertyIsLessThan>\n </ogc:And>\n </ogc:Filter>\n <sld:PolygonSymbolizer>\n <sld:Fill>\n <sld:CssParameter name=\"fill\">#FD8D3C</sld:CssParameter>\n </sld:Fill>\n <sld:Stroke>\n <sld:CssParameter name=\"stroke\">#ED7D2C</sld:CssParameter>\n </sld:Stroke>\n </sld:PolygonSymbolizer>\n </sld:Rule>\n <sld:Rule>\n <sld:Name>4</sld:Name>\n <sld:Title>High</sld:Title>\n <ogc:Filter>\n <ogc:PropertyIsGreaterThanOrEqualTo>\n <ogc:PropertyName>%s</ogc:PropertyName>\n <ogc:Literal>50</ogc:Literal>\n </ogc:PropertyIsGreaterThanOrEqualTo>\n </ogc:Filter>\n <sld:PolygonSymbolizer>\n <sld:Fill>\n <sld:CssParameter name=\"fill\">#F31A1C</sld:CssParameter>\n </sld:Fill>\n <sld:Stroke>\n <sld:CssParameter name=\"stroke\">#E30A0C</sld:CssParameter>\n </sld:Stroke>\n </sld:PolygonSymbolizer>\n </sld:Rule>\n </sld:FeatureTypeStyle>\n </sld:UserStyle>\n </sld:NamedLayer>\n</sld:StyledLayerDescriptor>\n\"\"\" % ((self.target_field,) * 6)\n\n return style\n" }, { "alpha_fraction": 0.6047502756118774, "alphanum_fraction": 0.6104598045349121, "avg_line_length": 34.890708923339844, "blob_id": "6f7197633cdbac8366d8967bb1a9e78d3874d385", "content_id": "577662509d2561b2f1ec69fff26922956596a6f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13136, "license_type": "no_license", "max_line_length": 79, "num_lines": 366, "path": "/impact/views.py", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "\"\"\"\nRisk in a Box HTTP API\n\nAll API calls start with:\n http://myriab.com/riab/api/v1\n\n * Version: All API calls begin with API version.\n * Path: For this documentation, we will assume every\n request begins with the above path.\n * Units: All coordinates are in WGS-84 (EPSG:4326)\n unless otherwise specified and all units of\n measurement are in the International System\n of Units (SI).\n * Format: All calls are returned in JSON.\n * Status Codes:\n 200 Successful GET and PUT.\n 201 Successful POST.\n 202 Successful calculation queued.\n 204 Successful DELETE\n 401 Unauthenticated.\n 409 Unsuccessful POST, PUT, or DELETE\n (Will return an errors object).\n\"\"\"\nfrom __future__ import division\n\nimport sys\nimport inspect\nimport datetime\n\nfrom django.utils import simplejson as json\nfrom django.http import HttpResponse\nfrom django.conf import settings\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom impact.storage.io import dummy_save, download\nfrom impact.storage.io import get_metadata, get_layer_descriptors\nfrom impact.storage.io import bboxlist2string\nfrom impact.storage.io import save_to_geonode\nfrom impact.storage.utilities import titelize\nfrom impact.plugins.core import get_plugin, get_plugins, compatible_layers\nfrom impact.engine.core import calculate_impact\nfrom impact.engine.core import get_common_resolution, get_bounding_boxes\nfrom impact.models import Calculation, Workspace\n\nfrom geonode.maps.utils import get_valid_user\n\nfrom urlparse import urljoin\n\nimport logging\nlogger = logging.getLogger('risiko')\n\n\ndef exception_format(e):\n \"\"\"Convert an exception object into a string,\n complete with stack trace info, suitable for display.\n \"\"\"\n import traceback\n info = ''.join(traceback.format_tb(sys.exc_info()[2]))\n return str(e) + '\\n\\n' + info\n\n\n@csrf_exempt\ndef calculate(request, save_output=save_to_geonode):\n start = datetime.datetime.now()\n\n if request.method == 'GET':\n # FIXME: Add a basic form here to be able to generate the POST request.\n return HttpResponse('This should be accessed by robots, not humans.'\n 'In other words using HTTP POST instead of GET.')\n elif request.method == 'POST':\n data = request.POST\n impact_function_name = data['impact_function']\n hazard_server = data['hazard_server']\n hazard_layer = data['hazard']\n exposure_server = data['exposure_server']\n exposure_layer = data['exposure']\n requested_bbox = data['bbox']\n keywords = data['keywords']\n\n if request.user.is_anonymous():\n theuser = get_valid_user()\n else:\n theuser = request.user\n\n # Create entry in database\n calculation = Calculation(user=theuser,\n run_date=start,\n hazard_server=hazard_server,\n hazard_layer=hazard_layer,\n exposure_server=exposure_server,\n exposure_layer=exposure_layer,\n impact_function=impact_function_name,\n success=False)\n\n # Wrap main computation loop in try except to catch and present\n # messages and stack traces in the application\n try:\n # Get metadata\n haz_metadata = get_metadata(hazard_server, hazard_layer)\n exp_metadata = get_metadata(exposure_server, exposure_layer)\n\n # Determine common resolution in case of raster layers\n raster_resolution = get_common_resolution(haz_metadata, exp_metadata)\n\n # Get reconciled bounding boxes\n haz_bbox, exp_bbox, imp_bbox = get_bounding_boxes(haz_metadata,\n exp_metadata,\n requested_bbox)\n\n # Record layers to download\n download_layers = [(hazard_server, hazard_layer, haz_bbox),\n (exposure_server, exposure_layer, exp_bbox)]\n\n # Add linked layers if any FIXME: STILL TODO!\n\n # Get selected impact function\n impact_function = get_plugin(impact_function_name)\n impact_function_source = inspect.getsource(impact_function)\n\n # Record information calculation object and save it\n calculation.impact_function_source = impact_function_source\n calculation.bbox = bboxlist2string(imp_bbox)\n calculation.save()\n\n # Start computation\n msg = 'Performing requested calculation'\n logger.info(msg)\n\n # Download selected layer objects\n layers = []\n for server, layer_name, bbox in download_layers:\n msg = ('- Downloading layer %s from %s'\n % (layer_name, server))\n logger.info(msg)\n L = download(server, layer_name, bbox, raster_resolution)\n layers.append(L)\n\n # Calculate result using specified impact function\n msg = ('- Calculating impact using %s' % impact_function)\n logger.info(msg)\n impact_filename = calculate_impact(layers=layers,\n impact_fcn=impact_function)\n\n # Upload result to internal GeoServer\n msg = ('- Uploading impact layer %s' % impact_filename)\n logger.info(msg)\n result = save_output(impact_filename,\n title='output_%s' % start.isoformat(),\n user=theuser)\n except Exception, e:\n # FIXME: Reimplement error saving for calculation.\n # FIXME (Ole): Why should we reimplement?\n # This is dangerous. Try to raise an exception\n # e.g. in get_metadata_from_layer. Things will silently fail.\n # See issue #170\n\n logger.error(e)\n errors = e.__str__()\n trace = exception_format(e)\n calculation.errors = errors\n calculation.stacktrace = trace\n calculation.save()\n jsondata = json.dumps({'errors': errors, 'stacktrace': trace})\n return HttpResponse(jsondata, mimetype='application/json')\n\n msg = ('- Result available at %s.' % result.get_absolute_url())\n logger.info(msg)\n\n calculation.layer = urljoin(settings.SITEURL, result.get_absolute_url())\n calculation.success = True\n calculation.save()\n\n output = calculation.__dict__\n\n # json.dumps does not like datetime objects,\n # let's make it a json string ourselves\n output['run_date'] = 'new Date(\"%s\")' % calculation.run_date\n\n # FIXME: This should not be needed in an ideal world\n ows_server_url = settings.GEOSERVER_BASE_URL + 'ows',\n output['ows_server_url'] = ows_server_url\n\n # json.dumps does not like django users\n output['user'] = calculation.user.username\n downloads = result.download_links()\n keys = [x[0] for x in downloads]\n values = [x[2] for x in downloads]\n download_dict = dict(zip(keys, values))\n if 'excel' in keys:\n output['excel'] = download_dict['excel']\n\n # Keywords do not like caption being there.\n # FIXME: Do proper parsing, don't assume caption is the only keyword.\n if 'caption' in result.keywords:\n caption = result.keywords.split('caption:')[1]\n # FIXME (Ole): Return underscores to spaces that was put in place\n # to store it in the first place. See issue #148\n output['caption'] = caption.replace('_', ' ')\n else:\n output['caption'] = 'Calculation finished ' \\\n 'in %s' % calculation.run_duration\n\n # Delete _state and _user_cache item from the dict,\n # they were created automatically by Django\n del output['_user_cache']\n del output['_state']\n\n # If success == True and errors = '' ...\n # ... let's make errors=None for backwards compat\n if output['success'] and len(output['errors']) == 0:\n output['errors'] = None\n\n jsondata = json.dumps(output)\n return HttpResponse(jsondata, mimetype='application/json')\n\n\ndef debug(request):\n \"\"\"Show a list of all the functions\"\"\"\n plugin_list = get_plugins()\n\n plugins_info = []\n for name, f in plugin_list.items():\n if not 'doc' in request.GET:\n plugins_info.append({\n 'name': name,\n 'location': f.__module__,\n })\n else:\n plugins_info.append({\n 'name': name,\n 'location': f.__module__,\n 'doc': f.__doc__,\n })\n\n output = {'plugins': plugins_info}\n jsondata = json.dumps(output)\n return HttpResponse(jsondata, mimetype='application/json')\n\n\ndef functions(request):\n \"\"\"Get a list of all the functions\n\n Will provide a list of plugin functions and the layers that\n the plugins will work with. Takes geoserver urls as a GET\n parameter can have a comma separated list\n\n e.g. http://127.0.0.1:8000/riab/api/v1/functions/?geoservers=http:...\n assumes version 1.0.0\n \"\"\"\n\n plugin_list = get_plugins()\n\n if 'geoservers' in request.GET:\n # FIXME for the moment assume version 1.0.0\n geolist = request.GET['geoservers'].split(',')\n geoservers = [{'url': geoserver, 'version': '1.0.0'}\n for geoserver in geolist]\n else:\n geoservers = get_servers(request.user)\n\n # Iterate across all available geoservers and return all\n # layer descriptors for use with the plugin subsystem\n layer_descriptors = []\n for geoserver in geoservers:\n layer_descriptors.extend(\n get_layer_descriptors(geoserver['url']))\n\n # For each plugin return all layers that meet the requirements\n # an empty layer is returned where the plugin cannot run\n annotated_plugins = []\n for name, f in plugin_list.items():\n layers = compatible_layers(f, layer_descriptors)\n\n annotated_plugins.append({'name': name,\n 'doc': f.__doc__,\n 'layers': layers})\n\n output = {'functions': annotated_plugins}\n jsondata = json.dumps(output)\n return HttpResponse(jsondata, mimetype='application/json')\n\n\ndef get_servers(user):\n \"\"\" Gets the list of servers for a given user\n \"\"\"\n if user.is_anonymous():\n theuser = get_valid_user()\n else:\n theuser = user\n try:\n workspace = Workspace.objects.get(user=theuser)\n except Workspace.DoesNotExist:\n workspace = Workspace.objects.get(user__username='default')\n servers = workspace.servers.all()\n geoservers = [{'url': settings.GEOSERVER_BASE_URL + 'ows',\n 'name': 'Local Geoserver',\n 'version': '1.0.0', 'id':0}]\n for server in servers:\n # TODO for the moment assume version 1.0.0\n geoservers.append({'url': server.url,\n 'name': server.name,\n 'id': server.id,\n 'version': '1.0.0'})\n\n return geoservers\n\n\ndef servers(request):\n \"\"\" Get the list of all the servers registered for a given user.\n\n If no user is passed, it will use a default one.\n \"\"\"\n geoservers = get_servers(request.user)\n output = {'servers': geoservers}\n jsondata = json.dumps(output)\n return HttpResponse(jsondata, mimetype='application/json')\n\n\ndef layers(request):\n \"\"\" Get the list of all layers annotated with metadata\n\n If a parameter called 'category' is passed, it will be\n used to filter the list.\n \"\"\"\n\n # FIXME (Ole): Why does the word 'category' have a special meaning?\n # Someone, please revisit this code!\n\n geoservers = get_servers(request.user)\n\n if 'category' in request.REQUEST:\n requested_category = request.REQUEST['category']\n else:\n requested_category = None\n\n # Iterate across all available geoservers and all layer descriptors\n layer_descriptors = []\n for geoserver in geoservers:\n ld = get_layer_descriptors(geoserver['url'])\n for layer in ld:\n out = {'name': layer[0],\n 'title': titelize(layer[1]['title']),\n 'server_url': geoserver['url']}\n metadata = layer[1]\n name_category = out['name'].split('_')\n if 'category' in metadata.keys():\n category = metadata['category']\n elif len(name_category) > 1:\n # FIXME: This is a temporary measure until we get the keywords:\n # https://github.com/AIFDR/riab/issues/46\n # If there is no metadata then try using format category_name\n # FIXME (Ole): This section should definitely be cleaned up\n # FIXME (Ole): CLEAN IT - NOW!!!\n category = name_category[0]\n else:\n category = None\n\n if requested_category is not None:\n if requested_category == category:\n layer_descriptors.append(out)\n else:\n layer_descriptors.append(out)\n\n output = {'objects': layer_descriptors}\n jsondata = json.dumps(output)\n return HttpResponse(jsondata, mimetype='application/json')\n" }, { "alpha_fraction": 0.7108238935470581, "alphanum_fraction": 0.7108238935470581, "avg_line_length": 30.316455841064453, "blob_id": "1811e0a51eb64c6170559fcbf05b27c3516111c4", "content_id": "7f409fb79c7f3943252a8e275fb4cbf3ee1f47ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2476, "license_type": "no_license", "max_line_length": 223, "num_lines": 79, "path": "/docs/development/documentation.rst", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "======================================\nGuide to Contributing to Documentation\n======================================\n\nIntroduction\n------------\nThe documentation you are viewing is produced in Sphinx [http://sphinx.pocoo.org/] and uses ReStructured Text (ReST). To find examples of how to write in ReST for Sphinx please vist the above Sphinx web page.\n\nRead-the-docs\n-------------\nThe documentation for this project is automatically made by the service Read-the-Docs at [http://risiko.readthedocs.org] from the risiko git repoistory docs directory. ::\n\n https://github.com/AIFDR/riab/tree/develop/docs\n\n\n.. note::\n\n if Read-the-docs does not update this can be done manually from the dashboard at [http://readthedocs.org/projects/risiko or http://readthedocs.org/projects/risiko_dev]. Click on the bottom button \"Build Latest Version\".\n\nDevelopment Documentation\n-------------------------\n\nYou can edit the docs straight from the git repository or after checking the repository out, making changes and pushing these changes back to github.\n\nChanges reflected in the development branch [http://readthedocs.org/projects/risiko_dev] can be found at http://risiko_dev.readthedocs.org.\n\nCurrent Stable Version\n----------------------\n\nDocumentation in the master branch [http://readthedocs.org/projects/risiko/] can be found at http://risiko.readthedocs.org.\n\n\nMaking the documentation\n------------------------\nTo manually make the documentation go to the Risiko docs directory and use...\n\n* For html::\n\n make html\n\n* For latex::\n\n make latexpdf\n\n\n.. note::\n\n In order to make the pdf documentation you will need to install the tex support. On Ubuntu you can use:\n\n sudo apt-get install texlive-full\n\n\nTo view the html documentation go to::\n\n [your Risiko install path]/docs/.build/html/index.html\n\nFor the pdf docs they are in::\n\n [your Risiko install path]/docs/.build/latex/riab.pdf\n\n\n\nDocumentation Structure\n-----------------------\n\nThe folders follow the main documentation sections\n\n* Intro: Anything to do with getting started with Risiko including quick-start installation and FAQ.\n\n* Usage: How to use Risiko, including tutorials and information on building plugins.\n - Plugins: Information about developing plugins\n\n* Development: Information to help any serious Risiko developers, includes architecture and coding information.\n\n* Deployment: How to deploy Risiko to various platforms.\n\nAlso:\n\n* Images: All the images used in the documentation\n\n\n" }, { "alpha_fraction": 0.58255934715271, "alphanum_fraction": 0.5892673134803772, "avg_line_length": 34.23636245727539, "blob_id": "5321f1053ab99f029fb842b65000ac4cbb3c041b", "content_id": "fa22b4479d379172f2a1facf5d934ebcf2508b29", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1938, "license_type": "no_license", "max_line_length": 78, "num_lines": 55, "path": "/impact/plugins/tephra/ashload_population_impact.py", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "import numpy\n\nfrom impact.plugins.core import FunctionProvider\nfrom impact.storage.raster import Raster\n\n\nclass TephraPopulationImpactFunction(FunctionProvider):\n \"\"\"Risk plugin for flood impact\n\n :author HKV\n :rating 1\n :param requires category=='hazard' and \\\n subcategory.startswith('tephra') and \\\n layer_type=='raster'\n :param requires category=='exposure' and \\\n subcategory.startswith('population') and \\\n layer_type=='raster'\n \"\"\"\n\n @staticmethod\n def run(layers):\n \"\"\"Risk plugin for earthquake fatalities\n\n Input\n layers: List of layers expected to contain\n H: Raster layer of flood depth\n P: Raster layer of population data on the same grid as H\n \"\"\"\n\n threshold = 1 # Load above which people are regarded affected [kg/m2]\n\n # Identify hazard and exposure layers\n inundation = layers[0] # Tephra load [kg/m2]\n population = layers[1] # Population density [people/km^2]\n\n # Extract data as numeric arrays\n D = inundation.get_data(nan=0.0) # Depth\n P = population.get_data(nan=0.0, scaling=True) # Population density\n\n # Calculate impact as population exposed to depths > threshold\n I = numpy.where(D > threshold, P, 0)\n\n # Generate text with result for this study\n number_of_people_affected = numpy.nansum(I.flat)\n caption = ('%i people affected by ash levels greater '\n 'than %i kg/m^2' % (number_of_people_affected,\n threshold))\n\n # Create raster object and return\n R = Raster(I,\n projection=inundation.get_projection(),\n geotransform=inundation.get_geotransform(),\n name='People affected',\n keywords={'caption': caption})\n return R\n" }, { "alpha_fraction": 0.5822954773902893, "alphanum_fraction": 0.60854572057724, "avg_line_length": 32.348243713378906, "blob_id": "b65c55de762675ca8e41c6b8ead6fe92e80c6b60", "content_id": "6b4fed8751ae8f677b2108e6f83f37e781873fa3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10438, "license_type": "no_license", "max_line_length": 79, "num_lines": 313, "path": "/impact/engine/interpolation2d.py", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "\"\"\"Module 2D interpolation over a rectangular mesh\n\nThis module\n* provides piecewise constant (nearest neighbour) and bilinear interpolation\n* is fast (based on numpy vector operations)\n* depends only on numpy\n* guarantees that interpolated values never exceed the four nearest neighbours\n* handles missing values in domain sensibly using NaN\n* is unit tested with a range of common and corner cases\n\nSee end of this file for documentation of the mathematical derivation used.\n\"\"\"\n\nimport numpy\n\n\ndef interpolate2d(x, y, Z, points, mode='linear', bounds_error=False):\n \"\"\"Fundamental 2D interpolation routine\n\n Input\n x: 1D array of x-coordinates of the mesh on which to interpolate\n y: 1D array of y-coordinates of the mesh on which to interpolate\n Z: 2D array of values for each x, y pair\n points: Nx2 array of coordinates where interpolated values are sought\n mode: Determines the interpolation order. Options are\n 'constant' - piecewise constant nearest neighbour interpolation\n 'linear' - bilinear interpolation using the four\n nearest neighbours (default)\n bounds_error: Boolean flag. If True (default) an exception will\n be raised when interpolated values are requested\n outside the domain of the input data. If False, nan\n is returned for those values\n Output\n 1D array with same length as points with interpolated values\n\n Notes\n Input coordinates x and y are assumed to be monotonically increasing,\n but need not be equidistantly spaced.\n\n Z is assumed to have dimension M x N, where M = len(x) and N = len(y).\n In other words it is assumed that the x values follow the first\n (vertical) axis downwards and y values the second (horizontal) axis\n from left to right.\n\n If this routine is to be used for interpolation of raster grids where\n data is typically organised with longitudes (x) going from left to\n right and latitudes (y) from left to right then user\n interpolate_raster in this module\n \"\"\"\n\n # Input checks\n x, y, Z, xi, eta = check_inputs(x, y, Z, points, mode, bounds_error)\n\n # If there is only one pixel, assign that value to all points\n #if len(x) == 1 and len(y) == 1:\n # return numpy.array([Z[0, 0]] * len(points))\n\n # Identify elements that are outside interpolation domain or NaN\n outside = (xi < x[0]) + (eta < y[0]) + (xi > x[-1]) + (eta > y[-1])\n outside += numpy.isnan(xi) + numpy.isnan(eta)\n\n inside = -outside\n xi = xi[inside]\n eta = eta[inside]\n\n # Find upper neighbours for each interpolation point\n idx = numpy.searchsorted(x, xi, side='left')\n idy = numpy.searchsorted(y, eta, side='left')\n\n # Internal check (index == 0 is OK)\n msg = ('Interpolation point outside domain. This should never happen. '\n 'Please email [email protected]')\n if len(idx) > 0:\n assert max(idx) < len(x), msg\n if len(idy) > 0:\n assert max(idy) < len(y), msg\n\n # Get the four neighbours for each interpolation point\n x0 = x[idx - 1]\n x1 = x[idx]\n y0 = y[idy - 1]\n y1 = y[idy]\n\n z00 = Z[idx - 1, idy - 1]\n z01 = Z[idx - 1, idy]\n z10 = Z[idx, idy - 1]\n z11 = Z[idx, idy]\n\n # Coefficients for weighting between lower and upper bounds\n alpha = (xi - x0) / (x1 - x0)\n beta = (eta - y0) / (y1 - y0)\n\n if mode == 'linear':\n # Bilinear interpolation formula\n dx = z10 - z00\n dy = z01 - z00\n z = z00 + alpha * dx + beta * dy + alpha * beta * (z11 - dx - dy - z00)\n else:\n # Piecewise constant (as verified in input_check)\n\n # Set up masks for the quadrants\n left = alpha < 0.5\n right = -left\n lower = beta < 0.5\n upper = -lower\n\n lower_left = lower * left\n lower_right = lower * right\n upper_left = upper * left\n\n # Initialise result array with all elements set to upper right\n z = z11\n\n # Then set the other quadrants\n z[lower_left] = z00[lower_left]\n z[lower_right] = z10[lower_right]\n z[upper_left] = z01[upper_left]\n\n # Self test\n if len(z) > 0:\n mz = numpy.nanmax(z)\n mZ = numpy.nanmax(Z)\n msg = ('Internal check failed. Max interpolated value %.15f '\n 'exceeds max grid value %.15f ' % (mz, mZ))\n if not(numpy.isnan(mz) or numpy.isnan(mZ)):\n assert mz <= mZ, msg\n\n # Populate result with interpolated values for points inside domain\n # and NaN for values outside\n r = numpy.zeros(len(points))\n r[inside] = z\n r[outside] = numpy.nan\n\n return r\n\n\ndef interpolate_raster(x, y, Z, points, mode='linear', bounds_error=False):\n \"\"\"2D interpolation of raster data\n\n It is assumed that data is organised in matrix Z as latitudes from\n bottom up along the first dimension and longitudes from west to east\n along the second dimension.\n\n Further it is assumed that x is the vector of longitudes and y the\n vector of latitudes.\n\n See interpolate2d for details of the interpolation routine\n \"\"\"\n\n # Flip matrix Z up-down so that scipy will interpret latitudes correctly.\n Z = numpy.flipud(Z)\n\n # Transpose Z to have y coordinates along the first axis and x coordinates\n # along the second axis\n Z = Z.transpose()\n\n # Call underlying interpolation routine and return\n res = interpolate2d(x, y, Z, points, mode=mode, bounds_error=bounds_error)\n return res\n\n\ndef check_inputs(x, y, Z, points, mode, bounds_error):\n \"\"\"Check inputs for interpolate2d function\n \"\"\"\n\n msg = 'Only mode \"linear\" and \"constant\" are implemented. I got %s' % mode\n assert mode in ['linear', 'constant'], msg\n\n try:\n x = numpy.array(x)\n except Exception, e:\n msg = ('Input vector x could not be converted to numpy array: '\n '%s' % str(e))\n raise Exception(msg)\n\n try:\n y = numpy.array(y)\n except Exception, e:\n msg = ('Input vector y could not be converted to numpy array: '\n '%s' % str(e))\n raise Exception(msg)\n\n msg = ('Input vector x must be monotoneously increasing. I got '\n 'min(x) == %.15f, but x[0] == %.15f' % (min(x), x[0]))\n assert min(x) == x[0], msg\n\n msg = ('Input vector y must be monotoneously increasing. '\n 'I got min(y) == %.15f, but y[0] == %.15f' % (min(y), y[0]))\n assert min(y) == y[0], msg\n\n msg = ('Input vector x must be monotoneously increasing. I got '\n 'max(x) == %.15f, but x[-1] == %.15f' % (max(x), x[-1]))\n assert max(x) == x[-1], msg\n\n msg = ('Input vector y must be monotoneously increasing. I got '\n 'max(y) == %.15f, but y[-1] == %.15f' % (max(y), y[-1]))\n assert max(y) == y[-1], msg\n\n try:\n Z = numpy.array(Z)\n m, n = Z.shape\n except Exception, e:\n msg = 'Z must be a 2D numpy array: %s' % str(e)\n raise Exception(msg)\n\n Nx = len(x)\n Ny = len(y)\n msg = ('Input array Z must have dimensions %i x %i corresponding to the '\n 'lengths of the input coordinates x and y. However, '\n 'Z has dimensions %i x %i.' % (Nx, Ny, m, n))\n assert Nx == m, msg\n assert Ny == n, msg\n\n # Get interpolation points\n points = numpy.array(points)\n xi = points[:, 0]\n eta = points[:, 1]\n\n if bounds_error:\n msg = ('Interpolation point %f was less than the smallest value in '\n 'domain %f and bounds_error was requested.' % (xi[0], x[0]))\n if xi[0] < x[0]:\n raise Exception(msg)\n\n msg = ('Interpolation point %f was greater than the largest value in '\n 'domain %f and bounds_error was requested.' % (xi[-1], x[-1]))\n if xi[-1] > x[-1]:\n raise Exception(msg)\n\n msg = ('Interpolation point %f was less than the smallest value in '\n 'domain %f and bounds_error was requested.' % (eta[0], y[0]))\n if eta[0] < y[0]:\n raise Exception(msg)\n\n msg = ('Interpolation point %f was greater than the largest value in '\n 'domain %f and bounds_error was requested.' % (eta[-1], y[-1]))\n if eta[-1] > y[-1]:\n raise Exception(msg)\n\n return x, y, Z, xi, eta\n\n\"\"\"\nBilinear interpolation is based on the standard 1D linear interpolation\nformula:\n\nGiven points (x0, y0) and (x1, x0) and a value of x where x0 <= x <= x1,\nthe linearly interpolated value y at x is given as\n\nalpha*(y1-y0) + y0\n\nor\n\nalpha*y1 + (1-alpha)*y0 (1)\n\nwhere alpha = (x-x0)/(x1-x0) (1a)\n\n\n2D bilinear interpolation aims at obtaining an interpolated value z at a point\n(x,y) which lies inside a square formed by points (x0, y0), (x1, y0),\n(x0, y1) and (x1, y1) for which values z00, z10, z01 and z11 are known.\n\nThis obtained be first applying equation (1) twice in in the x-direction\nto obtain interpolated points q0 and q1 for (x, y0) and (x, y1), respectively.\n\nq0 = alpha*z10 + (1-alpha)*z00 (2)\n\nand\n\nq1 = alpha*z11 + (1-alpha)*z01 (3)\n\n\nThen using equation (1) in the y-direction on the results from (2) and (3)\n\nz = beta*q1 + (1-beta)*q0 (4)\n\nwhere beta = (y-y0)/(y1-y0) (4a)\n\n\nSubstituting (2) and (3) into (4) yields\n\nz = alpha*beta*z11 + beta*z01 - alpha*beta*z01 +\n alpha*z10 + z00 - alpha*z00 - alpha*beta*z10 - beta*z00 +\n alpha*beta*z00\n = alpha*beta*(z11 - z01 - z10 + z00) +\n alpha*(z10 - z00) + beta*(z01 - z00) + z00\n\nwhich can be further simplified to\n\nz = alpha*beta*(z11 - dx - dy - z00) + alpha*dx + beta*dy + z00 (5)\n\nwhere\ndx = z10 - z00\ndy = z01 - z00\n\nEquation (5) is what is implemented in the function interpolate2d above.\n\n\nPiecewise constant interpolation can be implemented using the same coefficients\n(1a) and (4a) that are used for bilinear interpolation as they are a measure of\nthe relative distance to the left and lower neigbours. A value of 0 will pick\nthe left or lower bound whereas a value of 1 will pick the right or higher\nbound. Hence z can be assigned to its nearest neigbour as follows\n\n | z00 alpha < 0.5 and beta < 0.5 # lower left corner\n |\n | z10 alpha >= 0.5 and beta < 0.5 # lower right corner\nz = |\n | z01 alpha < 0.5 and beta >= 0.5 # upper left corner\n |\n | z11 alpha >= 0.5 and beta >= 0.5 # upper right corner\n\n\n\"\"\"\n" }, { "alpha_fraction": 0.5224738121032715, "alphanum_fraction": 0.5395138263702393, "avg_line_length": 38.96190643310547, "blob_id": "06a52a7672c256038e8a46929e6065c33313444c", "content_id": "20c218f4ce7ae6724cb302da8d4e4ed6f6d7de63", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 41960, "license_type": "no_license", "max_line_length": 79, "num_lines": 1050, "path": "/impact/tests/test_calculations.py", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "import numpy\nimport os\nimport sys\nimport unittest\nimport warnings\n\nfrom django.test.client import Client\nfrom django.conf import settings\nfrom django.utils import simplejson as json\n\nfrom geonode.maps.utils import get_valid_user, check_geonode_is_up\n\nfrom impact.views import calculate\n\nfrom impact.storage.io import save_to_geonode, check_layer\nfrom impact.storage.io import assert_bounding_box_matches\nfrom impact.storage.io import download\nfrom impact.storage.io import get_bounding_box\nfrom impact.storage.io import get_bounding_box_string\nfrom impact.storage.io import read_layer\nfrom impact.storage.io import get_metadata\n\nfrom impact.storage.utilities import nanallclose\n\nfrom impact.tests.utilities import TESTDATA, INTERNAL_SERVER_URL\nfrom owslib.wcs import WebCoverageService\n\n\ndef lembang_damage_function(x):\n if x < 6.0:\n value = 0.0\n else:\n value = (0.692 * (x ** 4) -\n 15.82 * (x ** 3) +\n 135.0 * (x ** 2) -\n 509.0 * x +\n 714.4)\n return value\n\n\nclass Test_calculations(unittest.TestCase):\n \"\"\"Tests of Risiko calculations\n \"\"\"\n\n def setUp(self):\n \"\"\"Create valid superuser\n \"\"\"\n self.user = get_valid_user()\n\n def test_io(self):\n \"\"\"Data can be uploaded and downloaded from internal GeoServer\n \"\"\"\n\n # Upload a raster and a vector data set\n for filename in ['population_padang_1.asc', 'lembang_schools.shp']:\n basename, ext = os.path.splitext(filename)\n filename = os.path.join(TESTDATA, filename)\n\n layer = save_to_geonode(filename, user=self.user, overwrite=True)\n\n # Name checking\n layer_name = layer.name\n expected_name = basename.lower()\n msg = 'Expected layername %s but got %s' % (expected_name,\n layer_name)\n assert layer_name == expected_name, msg\n\n workspace = layer.workspace\n\n msg = 'Expected workspace to be \"geonode\". Got %s' % workspace\n assert workspace == 'geonode'\n\n # Check metadata\n assert_bounding_box_matches(layer, filename)\n\n # Download layer again using workspace:name\n bbox = get_bounding_box(filename)\n downloaded_layer = download(INTERNAL_SERVER_URL,\n '%s:%s' % (workspace, layer_name),\n bbox)\n assert os.path.exists(downloaded_layer.filename)\n\n # FIXME (Ole): I wan't to check that the resolution is as expected\n # in case of raster layers.\n\n # FIXME (Ole): Bring this test back when issue:39 has been resolved\n # Check that exception is raised when using name without workspace\n #try:\n # downloaded_layer = download(INTERNAL_SERVER_URL,\n # layer_name,\n # bbox)\n #except AssertionError, e:\n # expected_error = 'Layer must have the format \"workspace:name\"'\n # msg = ('Exception was raised but error message was: %s\\n'\n # 'I expected error message: %s...' % (e,\n # expected_error))\n # assert str(e).startswith(expected_error), msg\n #else:\n # msg = ('Assertion error should have been raised for layer '\n # 'name %s which is not preceded by workspace'\n # % layer_name)\n # raise Exception(msg)\n\n # Check handling of invalid workspace name\n #try:\n # downloaded_layer = download(INTERNAL_SERVER_URL,\n # 'glokurp:%s' % layer_name,\n # bbox)\n #except:\n # msg = 'Write exception handling of invalid workspace name'\n # print msg\n # #raise Exception(msg)\n\n def test_the_earthquake_fatality_estimation_allen(self):\n \"\"\"Fatality computation computed correctly with GeoServer Data\n \"\"\"\n\n # Simulate bounding box from application\n viewport_bbox_string = '104.3,-8.2,110.04,-5.17'\n\n # Upload exposure data for this test\n name = 'Population_2010'\n exposure_filename = '%s/%s.asc' % (TESTDATA, name)\n exposure_layer = save_to_geonode(exposure_filename,\n user=self.user, overwrite=True)\n\n workspace = exposure_layer.workspace\n msg = 'Expected workspace to be \"geonode\". Got %s' % workspace\n assert workspace == 'geonode'\n\n layer_name = exposure_layer.name\n msg = 'Expected layer name to be \"%s\". Got %s' % (name, layer_name)\n assert layer_name == name.lower(), msg\n\n exposure_name = '%s:%s' % (workspace, layer_name)\n\n # Check metadata\n assert_bounding_box_matches(exposure_layer, exposure_filename)\n exp_bbox_string = get_bounding_box_string(exposure_filename)\n check_layer(exposure_layer, full=True)\n\n # Now we know that exposure layer is good, lets upload some\n # hazard layers and do the calculations\n filename = 'Lembang_Earthquake_Scenario.asc'\n\n # Save\n hazard_filename = '%s/%s' % (TESTDATA, filename)\n hazard_layer = save_to_geonode(hazard_filename,\n user=self.user, overwrite=True)\n hazard_name = '%s:%s' % (hazard_layer.workspace,\n hazard_layer.name)\n\n # Check metadata\n assert_bounding_box_matches(hazard_layer, hazard_filename)\n haz_bbox_string = get_bounding_box_string(hazard_filename)\n check_layer(hazard_layer, full=True)\n\n # Run calculation\n c = Client()\n rv = c.post('/impact/api/calculate/', data=dict(\n hazard_server=INTERNAL_SERVER_URL,\n hazard=hazard_name,\n exposure_server=INTERNAL_SERVER_URL,\n exposure=exposure_name,\n #bbox=viewport_bbox_string,\n bbox=exp_bbox_string, # This one reproduced the\n # crash for lembang\n impact_function='EarthquakeFatalityFunction',\n keywords='test,shakemap,usgs'))\n\n self.assertEqual(rv.status_code, 200)\n self.assertEqual(rv['Content-Type'], 'application/json')\n data = json.loads(rv.content)\n if 'errors' in data:\n errors = data['errors']\n if errors is not None:\n msg = ('The server returned the error message: %s'\n % str(errors))\n raise Exception(msg)\n\n assert 'success' in data\n assert 'hazard_layer' in data\n assert 'exposure_layer' in data\n assert 'run_duration' in data\n assert 'run_date' in data\n assert 'layer' in data\n\n assert data['success']\n\n # Download result and check\n layer_name = data['layer'].split('/')[-1]\n\n result_layer = download(INTERNAL_SERVER_URL,\n layer_name,\n get_bounding_box_string(hazard_filename))\n assert os.path.exists(result_layer.filename)\n\n def test_jakarta_flood_study(self):\n \"\"\"HKV Jakarta flood study calculated correctly using the API\n \"\"\"\n\n # FIXME (Ole): Redo with population as shapefile later\n\n # Expected values from HKV\n expected_values = [2485442, 1537920]\n\n # Name files for hazard level, exposure and expected fatalities\n population = 'Population_Jakarta_geographic'\n plugin_name = 'FloodImpactFunction'\n\n # Upload exposure data for this test\n exposure_filename = '%s/%s.asc' % (TESTDATA, population)\n exposure_layer = save_to_geonode(exposure_filename,\n user=self.user, overwrite=True)\n\n workspace = exposure_layer.workspace\n msg = 'Expected workspace to be \"geonode\". Got %s' % workspace\n assert workspace == 'geonode'\n\n layer_name = exposure_layer.name\n msg = 'Expected layer name to be \"%s\". Got %s' % (population,\n layer_name)\n assert layer_name.lower() == population.lower(), msg\n\n exposure_name = '%s:%s' % (workspace, layer_name)\n\n # Check metadata\n assert_bounding_box_matches(exposure_layer, exposure_filename)\n exp_bbox_string = get_bounding_box_string(exposure_filename)\n check_layer(exposure_layer, full=True)\n\n # Now we know that exposure layer is good, lets upload some\n # hazard layers and do the calculations\n\n i = 0\n for filename in ['Flood_Current_Depth_Jakarta_geographic.asc',\n 'Flood_Design_Depth_Jakarta_geographic.asc']:\n\n hazard_filename = os.path.join(TESTDATA, filename)\n exposure_filename = os.path.join(TESTDATA, population)\n\n # Save\n hazard_filename = '%s/%s' % (TESTDATA, filename)\n hazard_layer = save_to_geonode(hazard_filename,\n user=self.user, overwrite=True)\n hazard_name = '%s:%s' % (hazard_layer.workspace,\n hazard_layer.name)\n\n # Check metadata\n assert_bounding_box_matches(hazard_layer, hazard_filename)\n haz_bbox_string = get_bounding_box_string(hazard_filename)\n check_layer(hazard_layer, full=True)\n\n # Run calculation\n c = Client()\n rv = c.post('/impact/api/calculate/', data=dict(\n hazard_server=INTERNAL_SERVER_URL,\n hazard=hazard_name,\n exposure_server=INTERNAL_SERVER_URL,\n exposure=exposure_name,\n bbox=exp_bbox_string,\n impact_function=plugin_name,\n keywords='test,flood,HKV'))\n\n self.assertEqual(rv.status_code, 200)\n self.assertEqual(rv['Content-Type'], 'application/json')\n data = json.loads(rv.content)\n if 'errors' in data:\n errors = data['errors']\n if errors is not None:\n raise Exception(errors)\n\n assert 'hazard_layer' in data\n assert 'exposure_layer' in data\n assert 'run_duration' in data\n assert 'run_date' in data\n assert 'layer' in data\n\n # Do calculation manually and check result\n hazard_raster = read_layer(hazard_filename)\n H = hazard_raster.get_data(nan=0)\n\n exposure_raster = read_layer(exposure_filename + '.asc')\n P = exposure_raster.get_data(nan=0)\n\n # Calculate impact manually\n pixel_area = 2500\n I = numpy.where(H > 0.1, P, 0) / 100000.0 * pixel_area\n\n # Verify correctness against results from HKV\n res = sum(I.flat)\n ref = expected_values[i]\n #print filename, 'Result=%f' % res, ' Expected=%f' % ref\n #print 'Pct relative error=%f' % (abs(res-ref)*100./ref)\n\n msg = 'Got result %f but expected %f' % (res, ref)\n assert numpy.allclose(res, ref, rtol=1.0e-2), msg\n\n # Verify correctness of result\n # Download result and check\n layer_name = data['layer'].split('/')[-1]\n\n result_layer = download(INTERNAL_SERVER_URL,\n layer_name,\n get_bounding_box_string(hazard_filename))\n assert os.path.exists(result_layer.filename)\n\n calculated_raster = read_layer(result_layer.filename)\n C = calculated_raster.get_data(nan=0)\n\n # FIXME (Ole): Bring this back\n # Check caption\n #caption = calculated_raster.get_caption()\n #print\n #print caption\n #expct = 'people'\n #msg = ('Caption %s did not contain expected '\n # 'keyword %s' % (caption, expct))\n #assert expct in caption, msg\n\n # Compare shape and extrema\n msg = ('Shape of calculated raster differs from reference raster: '\n 'C=%s, I=%s' % (C.shape, I.shape))\n assert numpy.allclose(C.shape, I.shape,\n rtol=1e-12, atol=1e-12), msg\n\n msg = ('Minimum of calculated raster differs from reference '\n 'raster: '\n 'C=%s, I=%s' % (numpy.nanmin(C), numpy.nanmin(I)))\n assert numpy.allclose(numpy.nanmin(C), numpy.nanmin(I),\n rtol=1e-6, atol=1e-12), msg\n msg = ('Maximum of calculated raster differs from reference '\n 'raster: '\n 'C=%s, I=%s' % (numpy.nanmax(C), numpy.nanmax(I)))\n assert numpy.allclose(numpy.nanmax(C), numpy.nanmax(I),\n rtol=1e-6, atol=1e-12), msg\n\n # Compare every single value numerically (a bit loose -\n # probably due to single precision conversions when\n # data flows through geonode)\n #\n # FIXME: Not working - but since this test is about\n # issue #162 we'll leave it for now. TODO with NAN\n # Manually verified that the two expected values are correct,\n # though.\n #msg = 'Array values of written raster array were not as expected'\n #print C\n #print I\n #print numpy.amax(numpy.abs(C-I))\n #assert numpy.allclose(C, I, rtol=1e-2, atol=1e-5), msg\n\n # Check that extrema are in range\n xmin, xmax = calculated_raster.get_extrema()\n\n assert numpy.alltrue(C[-numpy.isnan(C)] >= xmin), msg\n assert numpy.alltrue(C[-numpy.isnan(C)] <= xmax)\n assert numpy.alltrue(C[-numpy.isnan(C)] >= 0)\n\n i += 1\n\n def test_metadata_available_after_upload(self):\n \"\"\"Test metadata is available after upload\n \"\"\"\n # Upload exposure data for this test\n name = 'Population_2010'\n exposure_filename = '%s/%s.asc' % (TESTDATA, name)\n exposure_layer = save_to_geonode(exposure_filename,\n user=self.user, overwrite=True)\n layer_name = exposure_layer.typename\n server_url = settings.GEOSERVER_BASE_URL + '/ows'\n wcs = WebCoverageService(server_url, version='1.0.0')\n layer_appears_immediately = layer_name in wcs.contents\n\n wait_time = 0.5\n import time\n time.sleep(wait_time)\n\n wcs2 = WebCoverageService(server_url, version='1.0.0')\n layer_appears_afterwards = layer_name in wcs2.contents\n\n msg = ('Layer %s was not found after %s seconds in WxS contents '\n 'on server %s.\\n'\n 'WCS contents: %s\\n' % (layer_name,\n wait_time,\n server_url,\n wcs.contents))\n\n assert layer_appears_afterwards, msg\n\n msg = ('Layer %s was not found in WxS contents on server %s.\\n'\n 'WCS contents: %s\\n' % (layer_name, server_url, wcs.contents))\n\n assert layer_appears_immediately, msg\n\n def test_lembang_building_examples(self):\n \"\"\"Lembang building impact calculation works through the API\n \"\"\"\n\n # Test for a range of hazard layers\n\n for mmi_filename in ['lembang_mmi_hazmap.asc']:\n #'Lembang_Earthquake_Scenario.asc']:\n\n # Upload input data\n hazardfile = os.path.join(TESTDATA, mmi_filename)\n hazard_layer = save_to_geonode(hazardfile, user=self.user)\n hazard_name = '%s:%s' % (hazard_layer.workspace, hazard_layer.name)\n\n exposurefile = os.path.join(TESTDATA, 'lembang_schools.shp')\n exposure_layer = save_to_geonode(exposurefile, user=self.user)\n exposure_name = '%s:%s' % (exposure_layer.workspace,\n exposure_layer.name)\n\n # Call calculation routine\n\n # FIXME (Ole): The system freaks out if there are spaces in\n # bbox string. Please let us catch that and deal\n # nicely with it - also do this in download()\n bbox = '105.592,-7.809,110.159,-5.647'\n\n #print\n #print get_bounding_box(hazardfile)\n #print get_bounding_box(exposurefile)\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n\n c = Client()\n rv = c.post('/impact/api/calculate/', data=dict(\n hazard_server=INTERNAL_SERVER_URL,\n hazard=hazard_name,\n exposure_server=INTERNAL_SERVER_URL,\n exposure=exposure_name,\n bbox=bbox,\n impact_function='Earthquake Building Damage Function',\n keywords='test,schools,lembang',\n ))\n\n self.assertEqual(rv.status_code, 200)\n self.assertEqual(rv['Content-Type'], 'application/json')\n data = json.loads(rv.content)\n assert 'hazard_layer' in data.keys()\n assert 'exposure_layer' in data.keys()\n assert 'run_duration' in data.keys()\n assert 'run_date' in data.keys()\n assert 'layer' in data.keys()\n\n # Download result and check\n layer_name = data['layer'].split('/')[-1]\n\n result_layer = download(INTERNAL_SERVER_URL,\n layer_name,\n bbox)\n assert os.path.exists(result_layer.filename)\n\n # Read hazard data for reference\n hazard_raster = read_layer(hazardfile)\n A = hazard_raster.get_data()\n mmi_min, mmi_max = hazard_raster.get_extrema()\n\n # Read calculated result\n impact_vector = read_layer(result_layer.filename)\n coordinates = impact_vector.get_geometry()\n attributes = impact_vector.get_data()\n\n # Verify calculated result\n count = 0\n for i in range(len(attributes)):\n lon, lat = coordinates[i][:]\n calculated_mmi = attributes[i]['MMI']\n\n if calculated_mmi == 0.0:\n # FIXME (Ole): Some points have MMI==0 here.\n # Weird but not a show stopper\n continue\n\n # Check that interpolated points are within range\n msg = ('Interpolated mmi %f was outside extrema: '\n '[%f, %f] at location '\n '[%f, %f]. ' % (calculated_mmi,\n mmi_min, mmi_max,\n lon, lat))\n assert mmi_min <= calculated_mmi <= mmi_max, msg\n\n # Check calculated damage\n calculated_dam = attributes[i]['DAMAGE']\n\n ref_dam = lembang_damage_function(calculated_mmi)\n msg = ('Calculated damage was not as expected '\n 'for hazard layer %s' % hazardfile)\n assert numpy.allclose(calculated_dam, ref_dam,\n rtol=1.0e-12), msg\n\n count += 1\n\n # Make only a few points were 0\n assert count > len(attributes) - 4\n\n # FIXME (Ole): Do as part of issue #74\n def XXtest_shakemap_population_exposure(self):\n \"\"\"Population exposed to groundshaking matches USGS numbers\n \"\"\"\n\n hazardfile = os.path.join(TESTDATA, 'shakemap_sumatra_20110129.tif')\n hazard_layer = save_to_geonode(hazardfile, overwrite=True,\n user=self.user)\n hazard_name = '%s:%s' % (hazard_layer.workspace, hazard_layer.name)\n\n exposurefile = os.path.join(TESTDATA, 'population_indonesia_2008.tif')\n exposure_layer = save_to_geonode(exposurefile, overwrite=True,\n user=self.user)\n exposure_name = '%s:%s' % (exposure_layer.workspace,\n exposure_layer.name)\n\n #with warnings.catch_warnings():\n # warnings.simplefilter('ignore')\n c = Client()\n rv = c.post('/impact/api/calculate/', data=dict(\n hazard_server=INTERNAL_SERVER_URL,\n hazard=hazard_name,\n exposure_server=INTERNAL_SERVER_URL,\n exposure=exposure_name,\n bbox=get_bounding_box_string(hazardfile),\n impact_function='USGSFatalityFunction',\n keywords='test,shakemap,usgs'))\n\n self.assertEqual(rv.status_code, 200)\n self.assertEqual(rv['Content-Type'], 'application/json')\n data = json.loads(rv.content)\n assert 'hazard_layer' in data.keys()\n assert 'exposure_layer' in data.keys()\n assert 'run_duration' in data.keys()\n assert 'run_date' in data.keys()\n assert 'layer' in data.keys()\n\n # Download result and check\n layer_name = data['layer'].split('/')[-1]\n\n result_layer = download(INTERNAL_SERVER_URL,\n layer_name,\n get_bounding_box(hazardfile))\n assert os.path.exists(result_layer.filename)\n\n # Read hazard data for reference\n hazard_raster = read_layer(hazardfile)\n H = hazard_raster.get_data()\n mmi_min, mmi_max = hazard_raster.get_extrema()\n\n # Read calculated result\n impact_raster = read_layer(result_layer.filename)\n I = impact_raster.get_data()\n\n # FIXME (Ole): Not finished\n\n def test_exceptions_in_calculate_endpoint(self):\n \"\"\"Wrong bbox input is handled nicely by /impact/api/calculate/\n \"\"\"\n\n # Upload input data\n hazardfile = os.path.join(TESTDATA, 'lembang_mmi_hazmap.asc')\n hazard_layer = save_to_geonode(hazardfile, user=self.user)\n hazard_name = '%s:%s' % (hazard_layer.workspace, hazard_layer.name)\n\n exposurefile = os.path.join(TESTDATA, 'lembang_schools.shp')\n exposure_layer = save_to_geonode(exposurefile, user=self.user)\n exposure_name = '%s:%s' % (exposure_layer.workspace,\n exposure_layer.name)\n\n bbox_correct = '105.592,-7.809,110.159,-5.647'\n bbox_with_spaces = '105.592, -7.809, 110.159, -5.647'\n bbox_non_numeric = '105.592,-7.809,x,-5.647'\n bbox_list = [1, 2, 3, 4]\n bbox_list_non_numeric = [1, '2', 3, 4]\n bbox_none = None\n bbox_wrong_number1 = '105.592,-7.809,-5.647'\n bbox_wrong_number2 = '105.592,-7.809,-5.647,34,123'\n bbox_empty = ''\n bbox_inconsistent1 = '110,-7.809,105,-5.647'\n bbox_inconsistent2 = '105.592,-5,110.159,-7'\n bbox_out_of_bound1 = '-185.592,-7.809,110.159,-5.647'\n bbox_out_of_bound2 = '105.592,-97.809,110.159,-5.647'\n bbox_out_of_bound3 = '105.592,-7.809,189.159,-5.647'\n bbox_out_of_bound4 = '105.592,-7.809,110.159,-105.647'\n\n data = dict(hazard_server=INTERNAL_SERVER_URL,\n hazard=hazard_name,\n exposure_server=INTERNAL_SERVER_URL,\n exposure=exposure_name,\n bbox=bbox_correct,\n impact_function='Earthquake Building Damage Function',\n keywords='test,schools,lembang')\n\n # First do it correctly (twice)\n c = Client()\n rv = c.post('/impact/api/calculate/', data=data)\n rv = c.post('/impact/api/calculate/', data=data)\n\n # Then check that spaces are dealt with correctly\n data['bbox'] = bbox_with_spaces\n rv = c.post('/impact/api/calculate/', data=data)\n\n # Then with a range of wrong bbox inputs\n for bad_bbox in [bbox_list,\n bbox_none,\n bbox_empty,\n bbox_non_numeric,\n bbox_list_non_numeric,\n bbox_wrong_number1,\n bbox_wrong_number2,\n bbox_inconsistent1,\n bbox_inconsistent2,\n bbox_out_of_bound1,\n bbox_out_of_bound2,\n bbox_out_of_bound3,\n bbox_out_of_bound4]:\n\n # Use erroneous bounding box\n data['bbox'] = bad_bbox\n\n # FIXME (Ole): Suppress error output from c.post\n rv = c.post('/impact/api/calculate/', data=data)\n self.assertEqual(rv.status_code, 200)\n self.assertEqual(rv['Content-Type'], 'application/json')\n data_out = json.loads(rv.content)\n\n msg = ('Bad bounding box %s should have raised '\n 'an error' % bad_bbox)\n assert 'errors' in data_out, msg\n\n def test_geotransform_from_geonode(self):\n \"\"\"Geotransforms of GeoNode layers can be correctly determined\n \"\"\"\n\n for filename in ['lembang_mmi_hazmap.asc',\n 'test_grid.asc']:\n\n # Upload file to GeoNode\n f = os.path.join(TESTDATA, filename)\n layer = save_to_geonode(f, user=self.user)\n\n # Read raster file and obtain reference resolution\n R = read_layer(f)\n ref_geotransform = R.get_geotransform()\n\n # Get geotransform from GeoNode\n layer_name = layer.typename\n metadata = get_metadata(INTERNAL_SERVER_URL, layer_name)\n\n geotransform_name = 'geotransform'\n msg = ('Could not find attribute \"%s\" in metadata. '\n 'Values are: %s' % (geotransform_name, metadata.keys()))\n assert geotransform_name in metadata, msg\n\n gn_geotransform = metadata[geotransform_name]\n msg = ('Geotransform obtained from GeoNode for layer %s '\n 'was not correct. I got %s but expected %s'\n '' % (layer_name, gn_geotransform, ref_geotransform))\n assert numpy.allclose(ref_geotransform, gn_geotransform), msg\n\n def test_data_resampling_example(self):\n \"\"\"Raster data is unchanged when going through geonode\n\n \"\"\"\n\n # Name file names for hazard level, exposure and expected fatalities\n hazard_filename = ('%s/maumere_aos_depth_20m_land_wgs84.asc'\n % TESTDATA)\n exposure_filename = ('%s/maumere_pop_prj.shp' % TESTDATA)\n\n #------------\n # Hazard data\n #------------\n # Read hazard input data for reference\n H_ref = read_layer(hazard_filename)\n\n A_ref = H_ref.get_data()\n depth_min_ref, depth_max_ref = H_ref.get_extrema()\n\n # Upload to internal geonode\n hazard_layer = save_to_geonode(hazard_filename, user=self.user)\n hazard_name = '%s:%s' % (hazard_layer.workspace, hazard_layer.name)\n\n # Download data again\n bbox = get_bounding_box_string(hazard_filename) # The biggest\n H = download(INTERNAL_SERVER_URL, hazard_name, bbox)\n\n A = H.get_data()\n depth_min, depth_max = H.get_extrema()\n\n # FIXME (Ole): The layer read from file is single precision only:\n # Issue #17\n # Here's the explanation why interpolation below produce slightly\n # different results (but why?)\n # The layer read from file is single precision which may be due to\n # the way it is converted from ASC to TIF. In other words the\n # problem may be in raster.write_to_file. Float64 is\n # specified there, so this is a mystery.\n #print 'A', A.dtype # Double precision\n #print 'A_ref', A_ref.dtype # Single precision\n\n # Compare extrema to values from numpy array\n assert numpy.allclose(depth_max, numpy.nanmax(A),\n rtol=1.0e-12, atol=1.0e-12)\n\n assert numpy.allclose(depth_max_ref, numpy.nanmax(A_ref),\n rtol=1.0e-12, atol=1.0e-12)\n\n # Compare to reference\n assert numpy.allclose([depth_min, depth_max],\n [depth_min_ref, depth_max_ref],\n rtol=1.0e-12, atol=1.0e-12)\n\n # Compare extrema to values read off QGIS for this layer\n assert numpy.allclose([depth_min, depth_max], [0.0, 16.68],\n rtol=1.0e-6, atol=1.0e-10)\n\n # Investigate difference visually\n #from matplotlib.pyplot import matshow, show\n #matshow(A)\n #matshow(A_ref)\n #matshow(A - A_ref)\n #show()\n\n #print\n for i in range(A.shape[0]):\n for j in range(A.shape[1]):\n if not numpy.isnan(A[i, j]):\n err = abs(A[i, j] - A_ref[i, j])\n if err > 0:\n msg = ('%i, %i: %.15f, %.15f, %.15f'\n % (i, j, A[i, j], A_ref[i, j], err))\n raise Exception(msg)\n #if A[i,j] > 16:\n # print i, j, A[i, j], A_ref[i, j]\n\n # Compare elements (nan & numbers)\n id_nan = numpy.isnan(A)\n id_nan_ref = numpy.isnan(A_ref)\n assert numpy.all(id_nan == id_nan_ref)\n assert numpy.allclose(A[-id_nan], A_ref[-id_nan],\n rtol=1.0e-15, atol=1.0e-15)\n\n #print 'MAX', A[245, 283], A_ref[245, 283]\n #print 'MAX: %.15f %.15f %.15f' %(A[245, 283], A_ref[245, 283])\n assert numpy.allclose(A[245, 283], A_ref[245, 283],\n rtol=1.0e-15, atol=1.0e-15)\n\n #--------------\n # Exposure data\n #--------------\n # Read exposure input data for reference\n E_ref = read_layer(exposure_filename)\n\n # Upload to internal geonode\n exposure_layer = save_to_geonode(exposure_filename, user=self.user)\n exposure_name = '%s:%s' % (exposure_layer.workspace,\n exposure_layer.name)\n\n # Download data again\n E = download(INTERNAL_SERVER_URL, exposure_name, bbox)\n\n # Check exposure data against reference\n coordinates = E.get_geometry()\n coordinates_ref = E_ref.get_geometry()\n assert numpy.allclose(coordinates, coordinates_ref,\n rtol=1.0e-12, atol=1.0e-12)\n\n attributes = E.get_data()\n attributes_ref = E_ref.get_data()\n for i, att in enumerate(attributes):\n att_ref = attributes_ref[i]\n for key in att:\n assert att[key] == att_ref[key]\n\n # Test riab's interpolation function\n I = H.interpolate(E, name='depth')\n icoordinates = I.get_geometry()\n\n I_ref = H_ref.interpolate(E_ref, name='depth')\n icoordinates_ref = I_ref.get_geometry()\n\n assert numpy.allclose(coordinates,\n icoordinates,\n rtol=1.0e-12, atol=1.0e-12)\n assert numpy.allclose(coordinates,\n icoordinates_ref,\n rtol=1.0e-12, atol=1.0e-12)\n\n iattributes = I.get_data()\n assert numpy.allclose(icoordinates, coordinates)\n\n N = len(icoordinates)\n assert N == 891\n\n # Set tolerance for single precision until issue #17 has been fixed\n # It appears that the single precision leads to larger interpolation\n # errors\n rtol_issue17 = 2.0e-3\n atol_issue17 = 1.0e-4\n\n # Verify interpolated values with test result\n for i in range(N):\n\n interpolated_depth_ref = I_ref.get_data()[i]['depth']\n interpolated_depth = iattributes[i]['depth']\n\n assert nanallclose(interpolated_depth,\n interpolated_depth_ref,\n rtol=rtol_issue17, atol=atol_issue17)\n\n pointid = attributes[i]['POINTID']\n\n if pointid == 263:\n\n #print i, pointid, attributes[i],\n #print interpolated_depth, coordinates[i]\n\n # Check that location is correct\n assert numpy.allclose(coordinates[i],\n [122.20367299, -8.61300358],\n rtol=1.0e-7, atol=1.0e-12)\n\n # This is known to be outside inundation area so should\n # near zero\n assert numpy.allclose(interpolated_depth, 0.0,\n rtol=1.0e-12, atol=1.0e-12)\n\n if pointid == 148:\n # Check that location is correct\n #print coordinates[i]\n assert numpy.allclose(coordinates[i],\n [122.2045912, -8.608483265],\n rtol=1.0e-7, atol=1.0e-12)\n\n # This is in an inundated area with a surrounding depths of\n # 4.531, 3.911\n # 2.675, 2.583\n assert interpolated_depth < 4.531\n assert interpolated_depth < 3.911\n assert interpolated_depth > 2.583\n assert interpolated_depth > 2.675\n\n #print interpolated_depth\n # This is a characterisation test for bilinear interpolation\n assert numpy.allclose(interpolated_depth, 3.62477215491,\n rtol=rtol_issue17, atol=1.0e-12)\n\n # Check that interpolated points are within range\n msg = ('Interpolated depth %f at point %i was outside extrema: '\n '[%f, %f]. ' % (interpolated_depth, i,\n depth_min, depth_max))\n\n if not numpy.isnan(interpolated_depth):\n assert depth_min <= interpolated_depth <= depth_max, msg\n\n def test_earthquake_exposure_plugin(self):\n \"\"\"Population exposure to individual MMI levels can be computed\n \"\"\"\n\n # Upload exposure data for this test\n # FIXME (Ole): While this dataset is ok for testing,\n # note that is has been resampled without scaling\n # so numbers are about 25 times too large.\n # Consider replacing test populations dataset for good measures,\n # just in case any one accidentally started using this dataset\n # for real.\n\n name = 'Population_2010'\n exposure_filename = '%s/%s.asc' % (TESTDATA, name)\n exposure_layer = save_to_geonode(exposure_filename,\n user=self.user, overwrite=True)\n exposure_name = '%s:%s' % (exposure_layer.workspace,\n exposure_layer.name)\n\n # Check metadata\n assert_bounding_box_matches(exposure_layer, exposure_filename)\n exp_bbox_string = get_bounding_box_string(exposure_filename)\n check_layer(exposure_layer, full=True)\n\n # Upload hazard data\n filename = 'Lembang_Earthquake_Scenario.asc'\n hazard_filename = '%s/%s' % (TESTDATA, filename)\n hazard_layer = save_to_geonode(hazard_filename,\n user=self.user, overwrite=True)\n hazard_name = '%s:%s' % (hazard_layer.workspace,\n hazard_layer.name)\n\n # Check metadata\n assert_bounding_box_matches(hazard_layer, hazard_filename)\n haz_bbox_string = get_bounding_box_string(hazard_filename)\n check_layer(hazard_layer, full=True)\n\n # Run calculation\n c = Client()\n rv = c.post('/impact/api/calculate/', data=dict(\n hazard_server=INTERNAL_SERVER_URL,\n hazard=hazard_name,\n exposure_server=INTERNAL_SERVER_URL,\n exposure=exposure_name,\n bbox=haz_bbox_string,\n impact_function='EarthquakePopulationExposureFunction',\n keywords='test,population,exposure,usgs'))\n\n self.assertEqual(rv.status_code, 200)\n self.assertEqual(rv['Content-Type'], 'application/json')\n data = json.loads(rv.content)\n if 'errors' in data:\n errors = data['errors']\n if errors is not None:\n msg = ('The server returned the error message: %s'\n % str(errors))\n raise Exception(msg)\n\n assert 'success' in data\n assert 'hazard_layer' in data\n assert 'exposure_layer' in data\n assert 'run_duration' in data\n assert 'run_date' in data\n assert 'layer' in data\n\n assert data['success']\n\n # Download result and check\n layer_name = data['layer'].split('/')[-1]\n\n result_layer = download(INTERNAL_SERVER_URL,\n layer_name,\n get_bounding_box_string(hazard_filename))\n assert os.path.exists(result_layer.filename)\n\n # Check calculated values\n keywords = result_layer.get_keywords()\n\n assert 'mmi-classes' in keywords\n assert 'affected-population' in keywords\n\n mmi_classes = [int(x) for x in keywords['mmi-classes'].split('_')]\n count = [float(x) for x in keywords['affected-population'].split('_')]\n\n # Brute force count for each population level\n population = download(INTERNAL_SERVER_URL,\n exposure_name,\n get_bounding_box_string(hazard_filename))\n intensity = download(INTERNAL_SERVER_URL,\n hazard_name,\n get_bounding_box_string(hazard_filename))\n\n # Extract data\n H = intensity.get_data(nan=0)\n P = population.get_data(nan=0)\n\n brutecount = {}\n for mmi in mmi_classes:\n brutecount[mmi] = 0\n\n for i in range(P.shape[0]):\n for j in range(P.shape[1]):\n mmi = H[i, j]\n if not numpy.isnan(mmi):\n mmi_class = int(round(mmi))\n\n pop = P[i, j]\n if not numpy.isnan(pop):\n brutecount[mmi_class] += pop\n\n for i, mmi in enumerate(mmi_classes):\n assert numpy.allclose(count[i], brutecount[mmi], rtol=1.0e-6)\n\n def test_linked_datasets(self):\n \"\"\"Linked datesets can be pulled in e.g. to include gender break down\n \"\"\"\n\n # Upload exposure data for this test. This will automatically\n # pull in female_pct_yogya.asc through its \"associates\" keyword\n name = 'population_yogya'\n exposure_filename = '%s/%s.asc' % (TESTDATA, name)\n exposure_layer = save_to_geonode(exposure_filename,\n user=self.user, overwrite=True)\n exposure_name = '%s:%s' % (exposure_layer.workspace,\n exposure_layer.name)\n\n # Check metadata\n assert_bounding_box_matches(exposure_layer, exposure_filename)\n exp_bbox_string = get_bounding_box_string(exposure_filename)\n check_layer(exposure_layer, full=True)\n\n # Upload hazard data\n filename = 'eq_yogya_2006.asc'\n hazard_filename = '%s/%s' % (TESTDATA, filename)\n hazard_layer = save_to_geonode(hazard_filename,\n user=self.user, overwrite=True)\n hazard_name = '%s:%s' % (hazard_layer.workspace,\n hazard_layer.name)\n\n # Check metadata\n assert_bounding_box_matches(hazard_layer, hazard_filename)\n haz_bbox_string = get_bounding_box_string(hazard_filename)\n check_layer(hazard_layer, full=True)\n\n # Run calculation\n c = Client()\n rv = c.post('/impact/api/calculate/', data=dict(\n hazard_server=INTERNAL_SERVER_URL,\n hazard=hazard_name,\n exposure_server=INTERNAL_SERVER_URL,\n exposure=exposure_name,\n bbox=haz_bbox_string,\n impact_function='EarthquakeFatalityFunction',\n keywords='test,fatalities,population,usgs'))\n\n self.assertEqual(rv.status_code, 200)\n self.assertEqual(rv['Content-Type'], 'application/json')\n data = json.loads(rv.content)\n if 'errors' in data:\n errors = data['errors']\n if errors is not None:\n msg = ('The server returned the error message: %s'\n % str(errors))\n raise Exception(msg)\n\n assert 'success' in data\n assert 'hazard_layer' in data\n assert 'exposure_layer' in data\n assert 'run_duration' in data\n assert 'run_date' in data\n assert 'layer' in data\n\n assert data['success']\n\n # Download result and check\n layer_name = data['layer'].split('/')[-1]\n\n result_layer = download(INTERNAL_SERVER_URL,\n layer_name,\n get_bounding_box_string(hazard_filename))\n assert os.path.exists(result_layer.filename)\n\n # Check calculated values\n keywords = result_layer.get_keywords()\n\n assert 'caption' in keywords\n\n # Parse caption and look for the correct numbers\n\n\nif __name__ == '__main__':\n os.environ['DJANGO_SETTINGS_MODULE'] = 'risiko.settings'\n suite = unittest.makeSuite(Test_calculations, 'test')\n runner = unittest.TextTestRunner(verbosity=2)\n runner.run(suite)\n" }, { "alpha_fraction": 0.6174402236938477, "alphanum_fraction": 0.6211907863616943, "avg_line_length": 47.45454406738281, "blob_id": "50b362547d4d5a2890cc73ee32996bbbbe14c1c4", "content_id": "c619451de4a92ddd650644d241b87a6a7fbe68ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2133, "license_type": "no_license", "max_line_length": 78, "num_lines": 44, "path": "/risiko/urls.py", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "from django.conf.urls.defaults import *\nfrom django.conf import settings\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\nfrom django.contrib import admin\n\nadmin.autodiscover()\n\njs_info_dict = {\n 'packages': ('geonode.maps',),\n}\n\nurlpatterns = patterns('',\n url(r'^$', 'geonode.views.index', name='index'),\n (r'^(?P<page>help)/?$', 'geonode.views.static'),\n (r'^developer/?$', 'geonode.views.developer'),\n (r'^lang\\.js$', 'django.views.generic.simple.direct_to_template',\n {'template': 'lang_risiko.js', 'mimetype': 'text/javascript'}, 'lang'),\n (r'^maps/', include('geonode.maps.urls')),\n (r'^proxy/', 'geonode.proxy.views.proxy'),\n (r'^geoserver/', 'geonode.proxy.views.geoserver'),\n url(r'^data/$', 'geonode.maps.views.browse_data', name='data'),\n url(r'^data/acls/?$', 'geonode.maps.views.layer_acls', name='layer_acls'),\n url(r'^data/api/batch_permissions/?$',\n 'geonode.maps.views.batch_permissions'),\n url(r'^data/api/batch_delete/?$', 'geonode.maps.views.batch_delete'),\n url(r'^data/upload$', 'geonode.maps.views.upload_layer',\n name='data_upload'),\n (r'^data/download$', 'geonode.maps.views.batch_layer_download'),\n (r'^data/(?P<layername>[^/]*)$', 'geonode.maps.views.layerController'),\n (r'^data/(?P<layername>[^/]*)/ajax-permissions$',\n 'geonode.maps.views.ajax_layer_permissions'),\n (r'^admin/', include(admin.site.urls)),\n (r'^i18n/', include('django.conf.urls.i18n')),\n (r'^jsi18n/$', 'django.views.i18n.javascript_catalog', js_info_dict),\n (r'^accounts/ajax_login$', 'geonode.views.ajax_login'),\n (r'^accounts/ajax_lookup$', 'geonode.views.ajax_lookup'),\n (r'^accounts/login', 'django.contrib.auth.views.login'),\n (r'^accounts/logout', 'django.contrib.auth.views.logout'),\n (r'^avatar/', include('avatar.urls')),\n (r'^accounts/', include('registration.urls')),\n (r'^profiles/', include('profiles.urls')),\n# (r'^rosetta/', include('rosetta.urls')),\n (r'^impact/', include('impact.urls')),\n ) + staticfiles_urlpatterns()\n\n" }, { "alpha_fraction": 0.5310412049293518, "alphanum_fraction": 0.5461506843566895, "avg_line_length": 38.22983932495117, "blob_id": "c259004a47ebba0da85e7bd41aa76dc20ff4641d", "content_id": "bfa1a554ca58ab8f3837559b3a33dca413b3e7d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 38916, "license_type": "no_license", "max_line_length": 79, "num_lines": 992, "path": "/impact/tests/test_engine.py", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "import unittest\nimport numpy\nimport sys\nimport os\n\nfrom impact.engine.core import calculate_impact\nfrom impact.engine.interpolation2d import interpolate_raster\nfrom impact.storage.io import read_layer\n\nfrom impact.storage.utilities import unique_filename\nfrom impact.storage.io import write_vector_data\nfrom impact.storage.io import write_raster_data\nfrom impact.plugins import get_plugins\n\nfrom impact.tests.utilities import TESTDATA\nfrom impact.tests.plugins import empirical_fatality_model\nfrom impact.tests.plugins import NEXIS_building_impact_model\n\n\ndef linear_function(x, y):\n \"\"\"Auxiliary function for use with interpolation test\n \"\"\"\n\n return x + y / 2.0\n\n\ndef lembang_damage_function(x):\n if x < 6.0:\n value = 0.0\n else:\n value = (0.692 * (x ** 4) -\n 15.82 * (x ** 3) +\n 135.0 * (x ** 2) -\n 509.0 * x +\n 714.4)\n return value\n\n\nclass Test_Engine(unittest.TestCase):\n\n def test_earthquake_fatality_estimation_allen(self):\n \"\"\"Fatalities from ground shaking can be computed correctly 1\n using aligned rasters\n \"\"\"\n\n # Name file names for hazard level, exposure and expected fatalities\n hazard_filename = '%s/Earthquake_Ground_Shaking_clip.tif' % TESTDATA\n exposure_filename = '%s/Population_2010_clip.tif' % TESTDATA\n\n # Calculate impact using API\n H = read_layer(hazard_filename)\n E = read_layer(exposure_filename)\n\n plugin_name = 'Earthquake Fatality Function'\n plugin_list = get_plugins(plugin_name)\n assert len(plugin_list) == 1\n assert plugin_list[0].keys()[0] == plugin_name\n\n IF = plugin_list[0][plugin_name]\n\n # Call calculation engine\n impact_filename = calculate_impact(layers=[H, E],\n impact_fcn=IF)\n\n # Do calculation manually and check result\n hazard_raster = read_layer(hazard_filename)\n H = hazard_raster.get_data(nan=0)\n\n exposure_raster = read_layer(exposure_filename)\n E = exposure_raster.get_data(nan=0)\n\n # Calculate impact manually\n a = 0.97429\n b = 11.037\n F = 10 ** (a * H - b) * E\n\n # Verify correctness of result\n calculated_raster = read_layer(impact_filename)\n C = calculated_raster.get_data(nan=0)\n\n # Compare shape and extrema\n msg = ('Shape of calculated raster differs from reference raster: '\n 'C=%s, F=%s' % (C.shape, F.shape))\n assert numpy.allclose(C.shape, F.shape, rtol=1e-12, atol=1e-12), msg\n\n msg = ('Minimum of calculated raster differs from reference raster: '\n 'C=%s, F=%s' % (numpy.min(C), numpy.min(F)))\n assert numpy.allclose(numpy.min(C), numpy.min(F),\n rtol=1e-12, atol=1e-12), msg\n msg = ('Maximum of calculated raster differs from reference raster: '\n 'C=%s, F=%s' % (numpy.max(C), numpy.max(F)))\n assert numpy.allclose(numpy.max(C), numpy.max(F),\n rtol=1e-12, atol=1e-12), msg\n\n # Compare every single value numerically\n msg = 'Array values of written raster array were not as expected'\n assert numpy.allclose(C, F, rtol=1e-12, atol=1e-12), msg\n\n # Check that extrema are in range\n xmin, xmax = calculated_raster.get_extrema()\n assert numpy.alltrue(C >= xmin)\n assert numpy.alltrue(C <= xmax)\n assert numpy.alltrue(C >= 0)\n\n def test_earthquake_fatality_estimation_ghasemi(self):\n \"\"\"Fatalities from ground shaking can be computed correctly 2\n using the Hadi Ghasemi function.\n \"\"\"\n\n # Name file names for hazard level, exposure and expected fatalities\n hazard_filename = '%s/Earthquake_Ground_Shaking_clip.tif' % TESTDATA\n exposure_filename = '%s/Population_2010_clip.tif' % TESTDATA\n\n # Calculate impact using API\n H = read_layer(hazard_filename)\n E = read_layer(exposure_filename)\n\n plugin_name = 'Empirical Fatality Function'\n plugin_list = get_plugins(plugin_name)\n assert len(plugin_list) == 1\n assert plugin_list[0].keys()[0] == plugin_name\n\n IF = plugin_list[0][plugin_name]\n\n # Call calculation engine\n impact_filename = calculate_impact(layers=[H, E],\n impact_fcn=IF)\n\n # Do calculation manually and check result\n hazard_raster = read_layer(hazard_filename)\n H = hazard_raster.get_data(nan=0)\n\n exposure_raster = read_layer(exposure_filename)\n E = exposure_raster.get_data(nan=0)\n\n # Verify correctness of result\n calculated_raster = read_layer(impact_filename)\n C = calculated_raster.get_data(nan=0)\n\n # Calculate impact manually\n # FIXME (Ole): Jono will do this\n return\n\n # Compare shape and extrema\n msg = ('Shape of calculated raster differs from reference raster: '\n 'C=%s, F=%s' % (C.shape, F.shape))\n assert numpy.allclose(C.shape, F.shape, rtol=1e-12, atol=1e-12), msg\n\n msg = ('Minimum of calculated raster differs from reference raster: '\n 'C=%s, F=%s' % (numpy.min(C), numpy.min(F)))\n assert numpy.allclose(numpy.min(C), numpy.min(F),\n rtol=1e-12, atol=1e-12), msg\n msg = ('Maximum of calculated raster differs from reference raster: '\n 'C=%s, F=%s' % (numpy.max(C), numpy.max(F)))\n assert numpy.allclose(numpy.max(C), numpy.max(F),\n rtol=1e-12, atol=1e-12), msg\n\n # Compare every single value numerically\n msg = 'Array values of written raster array were not as expected'\n assert numpy.allclose(C, F, rtol=1e-12, atol=1e-12), msg\n\n # Check that extrema are in range\n xmin, xmax = calculated_raster.get_extrema()\n assert numpy.alltrue(C >= xmin)\n assert numpy.alltrue(C <= xmax)\n assert numpy.alltrue(C >= 0)\n\n def test_jakarta_flood_study(self):\n \"\"\"HKV Jakarta flood study calculated correctly using aligned rasters\n \"\"\"\n\n # FIXME (Ole): Redo with population as shapefile later\n\n # Name file names for hazard level, exposure and expected fatalities\n\n population = 'Population_Jakarta_geographic.asc'\n plugin_name = 'Flood Impact Function'\n\n # Expected values from HKV\n expected_values = [2485442, 1537920]\n\n i = 0\n for filename in ['Flood_Current_Depth_Jakarta_geographic.asc',\n 'Flood_Design_Depth_Jakarta_geographic.asc']:\n\n hazard_filename = os.path.join(TESTDATA, filename)\n exposure_filename = os.path.join(TESTDATA, population)\n\n # Get layers using API\n H = read_layer(hazard_filename)\n E = read_layer(exposure_filename)\n\n plugin_list = get_plugins(plugin_name)\n assert len(plugin_list) == 1\n assert plugin_list[0].keys()[0] == plugin_name\n\n IF = plugin_list[0][plugin_name]\n\n # Call impact calculation engine\n impact_filename = calculate_impact(layers=[H, E],\n impact_fcn=IF)\n\n # Do calculation manually and check result\n hazard_raster = read_layer(hazard_filename)\n H = hazard_raster.get_data(nan=0)\n\n exposure_raster = read_layer(exposure_filename)\n P = exposure_raster.get_data(nan=0)\n\n # Calculate impact manually\n pixel_area = 2500\n I = numpy.where(H > 0.1, P, 0) / 100000.0 * pixel_area\n\n # Verify correctness against results from HKV\n res = sum(I.flat)\n ref = expected_values[i]\n #print filename, 'Result=%f' % res, ' Expected=%f' % ref\n #print 'Pct relative error=%f' % (abs(res-ref)*100./ref)\n\n msg = 'Got result %f but expected %f' % (res, ref)\n assert numpy.allclose(res, ref, rtol=1.0e-2), msg\n\n # Verify correctness of result\n calculated_raster = read_layer(impact_filename)\n C = calculated_raster.get_data(nan=0)\n\n # Check caption\n caption = calculated_raster.get_caption()\n expct = 'People'\n msg = ('Caption %s did not contain expected '\n 'keyword %s' % (caption, expct))\n assert expct in caption, msg\n\n # Compare shape and extrema\n msg = ('Shape of calculated raster differs from reference raster: '\n 'C=%s, I=%s' % (C.shape, I.shape))\n assert numpy.allclose(C.shape, I.shape,\n rtol=1e-12, atol=1e-12), msg\n\n msg = ('Minimum of calculated raster differs from reference '\n 'raster: '\n 'C=%s, I=%s' % (numpy.min(C), numpy.min(I)))\n assert numpy.allclose(numpy.min(C), numpy.min(I),\n rtol=1e-12, atol=1e-12), msg\n msg = ('Maximum of calculated raster differs from reference '\n 'raster: '\n 'C=%s, I=%s' % (numpy.max(C), numpy.max(I)))\n assert numpy.allclose(numpy.max(C), numpy.max(I),\n rtol=1e-12, atol=1e-12), msg\n\n # Compare every single value numerically\n msg = 'Array values of written raster array were not as expected'\n assert numpy.allclose(C, I, rtol=1e-12, atol=1e-12), msg\n\n # Check that extrema are in range\n xmin, xmax = calculated_raster.get_extrema()\n assert numpy.alltrue(C >= xmin)\n assert numpy.alltrue(C <= xmax)\n assert numpy.alltrue(C >= 0)\n\n i += 1\n\n def test_earthquake_damage_schools(self):\n \"\"\"Lembang building damage from ground shaking works\n\n This test also exercises interpolation of hazard level (raster) to\n building locations (vector data).\n \"\"\"\n\n for mmi_filename in ['lembang_mmi_hazmap.asc',\n 'Earthquake_Ground_Shaking_clip.tif', # NaN's\n 'Lembang_Earthquake_Scenario.asc']:\n\n # Name file names for hazard level and exposure\n hazard_filename = '%s/%s' % (TESTDATA, mmi_filename)\n exposure_filename = '%s/lembang_schools.shp' % TESTDATA\n\n # Calculate impact using API\n H = read_layer(hazard_filename)\n E = read_layer(exposure_filename)\n\n plugin_name = 'Earthquake Building Damage Function'\n plugin_list = get_plugins(plugin_name)\n assert len(plugin_list) == 1\n assert plugin_list[0].keys()[0] == plugin_name\n\n IF = plugin_list[0][plugin_name]\n\n impact_filename = calculate_impact(layers=[H, E],\n impact_fcn=IF)\n\n # Read input data\n hazard_raster = read_layer(hazard_filename)\n A = hazard_raster.get_data()\n mmi_min, mmi_max = hazard_raster.get_extrema()\n\n exposure_vector = read_layer(exposure_filename)\n coordinates = exposure_vector.get_geometry()\n attributes = exposure_vector.get_data()\n\n # Read calculated result\n impact_vector = read_layer(impact_filename)\n icoordinates = impact_vector.get_geometry()\n iattributes = impact_vector.get_data()\n\n # First check that interpolated MMI was done as expected\n fid = open('%s/lembang_schools_percentage_loss_and_mmi.txt'\n % TESTDATA)\n reference_points = []\n MMI = []\n DAM = []\n for line in fid.readlines()[1:]:\n fields = line.strip().split(',')\n\n lon = float(fields[4][1:-1])\n lat = float(fields[3][1:-1])\n mmi = float(fields[-1][1:-1])\n dam = float(fields[-2][1:-1])\n\n reference_points.append((lon, lat))\n MMI.append(mmi)\n DAM.append(dam)\n\n # Verify that coordinates are consistent\n msg = 'Interpolated coordinates do not match those of test data'\n assert numpy.allclose(icoordinates, reference_points), msg\n\n # Verify interpolated MMI with test result\n min_damage = sys.maxint\n max_damage = -min_damage\n for i in range(len(MMI)):\n lon, lat = icoordinates[i][:]\n calculated_mmi = iattributes[i]['MMI']\n\n if numpy.isnan(calculated_mmi):\n continue\n\n # Check that interpolated points are within range\n msg = ('Interpolated mmi %f from file %s was outside '\n 'extrema: [%f, %f] at location '\n '[%f, %f].' % (calculated_mmi, hazard_filename,\n mmi_min, mmi_max, lon, lat))\n assert mmi_min <= calculated_mmi <= mmi_max, msg\n\n # Set up some tolerances for comparison with test set.\n if mmi_filename.startswith('Lembang_Earthquake'):\n pct = 3\n else:\n pct = 2\n\n # Check that interpolated result is within specified tolerance\n msg = ('Calculated MMI %f deviated more than %.1f%% from '\n 'what was expected %f' % (calculated_mmi, pct, MMI[i]))\n assert numpy.allclose(calculated_mmi, MMI[i],\n rtol=float(pct) / 100), msg\n\n calculated_dam = iattributes[i]['DAMAGE']\n if calculated_dam > max_damage:\n max_damage = calculated_dam\n\n if calculated_dam < min_damage:\n min_damage = calculated_dam\n\n ref_dam = lembang_damage_function(calculated_mmi)\n msg = ('Calculated damage was not as expected')\n assert numpy.allclose(calculated_dam, ref_dam,\n rtol=1.0e-12), msg\n\n # Test that test data is correct by calculating damage based\n # on reference MMI.\n # FIXME (Ole): UNCOMMENT WHEN WE GET THE CORRECT DATASET\n #expected_test_damage = lembang_damage_function(MMI[i])\n #msg = ('Test data is inconsistent: i = %i, MMI = %f,'\n # 'expected_test_damage = %f, '\n # 'actual_test_damage = %f' % (i, MMI[i],\n # expected_test_damage,\n # DAM[i]))\n #if not numpy.allclose(expected_test_damage,\n # DAM[i], rtol=1.0e-12):\n # print msg\n\n # Note this test doesn't work, but the question is whether the\n # independent test data is correct.\n # Also small fluctuations in MMI can cause very large changes\n # in computed damage for this example.\n # print mmi, MMI[i], calculated_damage, DAM[i]\n #msg = ('Calculated damage was not as expected for point %i:'\n # 'Got %f, expected %f' % (i, calculated_dam, DAM[i]))\n #assert numpy.allclose(calculated_dam, DAM[i], rtol=0.8), msg\n\n assert min_damage >= 0\n assert max_damage <= 100\n #print 'Extrema', mmi_filename, min_damage, max_damage\n #print len(MMI)\n\n def test_earthquake_impact_OSM_data(self):\n \"\"\"Earthquake layer interpolation to OSM building data works\n\n The impact function used is based on the guidelines plugin\n\n This test also exercises interpolation of hazard level (raster) to\n building locations (vector data).\n \"\"\"\n\n # FIXME: Still needs some reference data to compare to\n for mmi_filename in ['Shakemap_Padang_2009.asc',\n # Time consuming\n #'Earthquake_Ground_Shaking.asc',\n 'Lembang_Earthquake_Scenario.asc']:\n\n # Name file names for hazard level and exposure\n hazard_filename = '%s/%s' % (TESTDATA, mmi_filename)\n exposure_filename = ('%s/OSM_building_polygons_20110905.shp'\n % TESTDATA)\n\n # Calculate impact using API\n H = read_layer(hazard_filename)\n E = read_layer(exposure_filename)\n\n plugin_name = 'Earthquake Guidelines Function'\n plugin_list = get_plugins(plugin_name)\n assert len(plugin_list) == 1\n assert plugin_list[0].keys()[0] == plugin_name\n\n IF = plugin_list[0][plugin_name]\n impact_filename = calculate_impact(layers=[H, E],\n impact_fcn=IF)\n\n # Read input data\n hazard_raster = read_layer(hazard_filename)\n A = hazard_raster.get_data()\n mmi_min, mmi_max = hazard_raster.get_extrema()\n\n exposure_vector = read_layer(exposure_filename)\n coordinates = exposure_vector.get_geometry()\n attributes = exposure_vector.get_data()\n\n # Read calculated result\n impact_vector = read_layer(impact_filename)\n icoordinates = impact_vector.get_geometry()\n iattributes = impact_vector.get_data()\n\n # Verify interpolated MMI with test result\n for i in range(len(iattributes)):\n calculated_mmi = iattributes[i]['MMI']\n\n if numpy.isnan(calculated_mmi):\n continue\n\n # Check that interpolated points are within range\n msg = ('Interpolated mmi %f from file %s was outside '\n 'extrema: [%f, %f] at point %i '\n % (calculated_mmi, hazard_filename,\n mmi_min, mmi_max, i))\n assert mmi_min <= calculated_mmi <= mmi_max, msg\n\n calculated_dam = iattributes[i]['DMGLEVEL']\n assert calculated_dam in [1, 2, 3]\n\n def test_tsunami_loss_use_case(self):\n \"\"\"Building loss from tsunami use case works\n \"\"\"\n\n # This test merely exercises the use case as there is\n # no reference data. It does check the sanity of values as\n # far as possible.\n\n hazard_filename = ('%s/tsunami_max_inundation_depth_BB_'\n 'geographic.asc' % TESTDATA)\n exposure_filename = ('%s/tsunami_exposure_BB.shp' % TESTDATA)\n exposure_with_depth_filename = ('%s/tsunami_exposure_BB_'\n 'with_depth.shp' % TESTDATA)\n reference_impact_filename = ('%s/tsunami_impact_assessment_'\n 'BB.shp' % TESTDATA)\n\n # Calculate impact using API\n H = read_layer(hazard_filename)\n E = read_layer(exposure_filename)\n\n plugin_name = 'Tsunami Building Loss Function'\n plugin_list = get_plugins(plugin_name)\n assert len(plugin_list) == 1\n assert plugin_list[0].keys()[0] == plugin_name\n\n IF = plugin_list[0][plugin_name]\n impact_filename = calculate_impact(layers=[H, E],\n impact_fcn=IF)\n\n # Read calculated result\n impact_vector = read_layer(impact_filename)\n icoordinates = impact_vector.get_geometry()\n iattributes = impact_vector.get_data()\n N = len(icoordinates)\n\n # Ensure that calculated point locations coincide with\n # original exposure point locations\n ref_exp = read_layer(exposure_filename)\n refcoordinates = ref_exp.get_geometry()\n\n assert N == len(refcoordinates)\n msg = ('Coordinates of impact results do not match those of '\n 'exposure data')\n assert numpy.allclose(icoordinates, refcoordinates), msg\n\n # Ensure that calculated point locations coincide with\n # original exposure point (with depth) locations\n ref_depth = read_layer(exposure_with_depth_filename)\n refdepth_coordinates = ref_depth.get_geometry()\n refdepth_attributes = ref_depth.get_data()\n assert N == len(refdepth_coordinates)\n msg = ('Coordinates of impact results do not match those of '\n 'exposure data (with depth)')\n assert numpy.allclose(icoordinates, refdepth_coordinates), msg\n\n # Read reference results\n hazard_raster = read_layer(hazard_filename)\n A = hazard_raster.get_data()\n depth_min, depth_max = hazard_raster.get_extrema()\n\n ref_impact = read_layer(reference_impact_filename)\n refimpact_coordinates = ref_impact.get_geometry()\n refimpact_attributes = ref_impact.get_data()\n\n # Check for None\n for i in range(N):\n if refimpact_attributes[i] is None:\n msg = 'Element %i was None' % i\n raise Exception(msg)\n\n # Check sanity of calculated attributes\n for i in range(N):\n lon, lat = icoordinates[i]\n\n depth = iattributes[i]['DEPTH']\n\n # Ignore NaN's\n if numpy.isnan(depth):\n continue\n\n structural_damage = iattributes[i]['STRUCT_DAM']\n contents_damage = iattributes[i]['CONTENTS_D']\n for imp in [structural_damage, contents_damage]:\n msg = ('Percent damage was outside range: %f' % imp)\n assert 0 <= imp <= 1, msg\n\n structural_loss = iattributes[i]['STRUCT_LOS']\n contents_loss = iattributes[i]['CONTENTS_L']\n if depth < 0.3:\n assert structural_loss == 0.0\n assert contents_loss == 0.0\n else:\n assert structural_loss > 0.0\n assert contents_loss > 0.0\n\n number_of_people = iattributes[i]['NEXIS_PEOP']\n people_affected = iattributes[i]['PEOPLE_AFF']\n people_severely_affected = iattributes[i]['PEOPLE_SEV']\n\n if 0.01 < depth < 1.0:\n assert people_affected == number_of_people\n else:\n assert people_affected == 0\n\n if depth >= 1.0:\n assert people_severely_affected == number_of_people\n else:\n assert people_severely_affected == 0\n\n # Contents and structural damage is done according\n # to different damage curves and should therefore be different\n if depth > 0 and contents_damage > 0:\n assert contents_damage != structural_damage\n\n def test_tephra_load_impact(self):\n \"\"\"Hypothetical tephra load scenario can be computed\n\n This test also exercises reprojection of UTM data\n \"\"\"\n\n # File names for hazard level and exposure\n\n # FIXME - when we know how to reproject, replace hazard\n # file with UTM version (i.e. without _geographic).\n hazard_filename = os.path.join(TESTDATA,\n 'Ashload_Gede_VEI4_geographic.asc')\n exposure_filename = os.path.join(TESTDATA, 'lembang_schools.shp')\n\n # Calculate impact using API\n H = read_layer(hazard_filename)\n E = read_layer(exposure_filename)\n\n plugin_name = 'Tephra Impact Function'\n plugin_list = get_plugins(plugin_name)\n assert len(plugin_list) == 1\n assert plugin_list[0].keys()[0] == plugin_name\n\n IF = plugin_list[0][plugin_name]\n impact_filename = calculate_impact(layers=[H, E],\n impact_fcn=IF)\n\n # Read input data\n hazard_raster = read_layer(hazard_filename)\n A = hazard_raster.get_data()\n load_min, load_max = hazard_raster.get_extrema()\n\n exposure_vector = read_layer(exposure_filename)\n coordinates = exposure_vector.get_geometry()\n attributes = exposure_vector.get_data()\n\n # Read calculated result\n impact_vector = read_layer(impact_filename)\n coordinates = impact_vector.get_geometry()\n attributes = impact_vector.get_data()\n\n # Test that results are as expected\n # FIXME: Change test when we decide what values should actually be\n # calculated :-) :-) :-)\n for a in attributes:\n load = a['ASHLOAD']\n impact = a['DAMAGE']\n\n # Test interpolation\n msg = 'Load %.15f was outside bounds [%f, %f]' % (load,\n load_min,\n load_max)\n if not numpy.isnan(load):\n assert load_min <= load <= load_max, msg\n\n # Test calcalated values\n #if 0.01 <= load < 90.0:\n # assert impact == 1\n #elif 90.0 <= load < 150.0:\n # assert impact == 2\n #elif 150.0 <= load < 300.0:\n # assert impact == 3\n #elif load >= 300.0:\n # assert impact == 4\n #else:\n # assert impact == 0\n\n if 0.01 <= load < 0.5:\n assert impact == 0\n elif 0.5 <= load < 2.0:\n assert impact == 1\n elif 2.0 <= load < 10.0:\n assert impact == 2\n elif load >= 10.0:\n assert impact == 3\n else:\n assert impact == 0\n\n def test_package_metadata(self):\n \"\"\"Test that riab package loads\n \"\"\"\n\n import impact\n\n impact.VERSION\n impact.__version__\n impact.__author__\n impact.__contact__\n impact.__homepage__\n impact.__docformat__\n assert impact.__license__ == 'GPL'\n\n def test_interpolation_wrapper(self):\n \"\"\"Interpolation library works for linear function\n \"\"\"\n\n # Create test data\n lon_ul = 100 # Longitude of upper left corner\n lat_ul = 10 # Latitude of upper left corner\n numlon = 8 # Number of longitudes\n numlat = 5 # Number of latitudes\n dlon = 1\n dlat = -1\n\n # Define array where latitudes are rows and longitude columns\n A = numpy.zeros((numlat, numlon))\n\n # Establish coordinates for lower left corner\n lat_ll = lat_ul - numlat\n lon_ll = lon_ul\n\n # Define pixel centers along each direction\n longitudes = numpy.linspace(lon_ll + 0.5,\n lon_ll + numlon - 0.5, numlon)\n latitudes = numpy.linspace(lat_ll + 0.5,\n lat_ll + numlat - 0.5, numlat)\n\n # Define raster with latitudes going bottom-up (south to north).\n # Longitudes go left-right (west to east)\n for i in range(numlat):\n for j in range(numlon):\n A[numlat - 1 - i, j] = linear_function(longitudes[j],\n latitudes[i])\n\n # Test first that original points are reproduced correctly\n for i, eta in enumerate(latitudes):\n for j, xi in enumerate(longitudes):\n\n val = interpolate_raster(longitudes, latitudes, A,\n [(xi, eta)], mode='linear')[0]\n assert numpy.allclose(val,\n linear_function(xi, eta),\n rtol=1e-12, atol=1e-12)\n\n # Then test that genuinly interpolated points are correct\n xis = numpy.linspace(lon_ll + 1, lon_ll + numlon - 1, 10 * numlon)\n etas = numpy.linspace(lat_ll + 1, lat_ll + numlat - 1, 10 * numlat)\n for xi in xis:\n for eta in etas:\n val = interpolate_raster(longitudes, latitudes, A,\n [(xi, eta)], mode='linear')[0]\n assert numpy.allclose(val,\n linear_function(xi, eta),\n rtol=1e-12, atol=1e-12)\n\n def test_riab_interpolation(self):\n \"\"\"Interpolation using Raster and Vector objects\n \"\"\"\n\n # Create test data\n lon_ul = 100 # Longitude of upper left corner\n lat_ul = 10 # Latitude of upper left corner\n numlon = 8 # Number of longitudes\n numlat = 5 # Number of latitudes\n dlon = 1\n dlat = -1\n\n # Define array where latitudes are rows and longitude columns\n A = numpy.zeros((numlat, numlon))\n\n # Establish coordinates for lower left corner\n lat_ll = lat_ul - numlat\n lon_ll = lon_ul\n\n # Define pixel centers along each direction\n longitudes = numpy.linspace(lon_ll + 0.5,\n lon_ll + numlon - 0.5,\n numlon)\n latitudes = numpy.linspace(lat_ll + 0.5,\n lat_ll + numlat - 0.5,\n numlat)\n\n # Define raster with latitudes going bottom-up (south to north).\n # Longitudes go left-right (west to east)\n for i in range(numlat):\n for j in range(numlon):\n A[numlat - 1 - i, j] = linear_function(longitudes[j],\n latitudes[i])\n\n # Write array to a raster file\n geotransform = (lon_ul, dlon, 0, lat_ul, 0, dlat)\n projection = ('GEOGCS[\"GCS_WGS_1984\",'\n 'DATUM[\"WGS_1984\",'\n 'SPHEROID[\"WGS_1984\",6378137.0,298.257223563]],'\n 'PRIMEM[\"Greenwich\",0.0],'\n 'UNIT[\"Degree\",0.0174532925199433]]')\n\n raster_filename = unique_filename(suffix='.tif')\n write_raster_data(A,\n projection,\n geotransform,\n raster_filename)\n\n # Write test interpolation point to a vector file\n coordinates = []\n for xi in longitudes:\n for eta in latitudes:\n coordinates.append((xi, eta))\n\n vector_filename = unique_filename(suffix='.shp')\n write_vector_data(data=None,\n projection=projection,\n geometry=coordinates,\n filename=vector_filename)\n\n # Read both datasets back in\n R = read_layer(raster_filename)\n V = read_layer(vector_filename)\n\n # Then test that axes and data returned by R are correct\n x, y = R.get_geometry()\n msg = 'X axes was %s, should have been %s' % (longitudes, x)\n assert numpy.allclose(longitudes, x), msg\n msg = 'Y axes was %s, should have been %s' % (latitudes, y)\n assert numpy.allclose(latitudes, y), msg\n AA = R.get_data()\n msg = 'Raster data was %s, should have been %s' % (AA, A)\n assert numpy.allclose(AA, A), msg\n\n # Test riab's interpolation function\n I = R.interpolate(V, name='value')\n Icoordinates = I.get_geometry()\n Iattributes = I.get_data()\n\n assert numpy.allclose(Icoordinates, coordinates)\n\n # Test that interpolated points are correct\n for i, (xi, eta) in enumerate(Icoordinates):\n\n z = Iattributes[i]['value']\n #print xi, eta, z, linear_function(xi, eta)\n assert numpy.allclose(z, linear_function(xi, eta),\n rtol=1e-12)\n\n # FIXME (Ole): Need test for values outside grid.\n # They should be NaN or something\n\n # Cleanup\n # FIXME (Ole): Shape files are a collection of files. How to remove?\n os.remove(vector_filename)\n\n def test_interpolation_lembang(self):\n \"\"\"Interpolation using Lembang data set\n \"\"\"\n\n # Name file names for hazard level, exposure and expected fatalities\n hazard_filename = '%s/lembang_mmi_hazmap.asc' % TESTDATA\n exposure_filename = '%s/lembang_schools.shp' % TESTDATA\n\n # Read input data\n hazard_raster = read_layer(hazard_filename)\n A = hazard_raster.get_data()\n mmi_min, mmi_max = hazard_raster.get_extrema()\n\n exposure_vector = read_layer(exposure_filename)\n coordinates = exposure_vector.get_geometry()\n attributes = exposure_vector.get_data()\n\n # Test riab's interpolation function\n I = hazard_raster.interpolate(exposure_vector,\n name='mmi')\n Icoordinates = I.get_geometry()\n Iattributes = I.get_data()\n assert numpy.allclose(Icoordinates, coordinates)\n\n # Check that interpolated MMI was done as expected\n fid = open('%s/lembang_schools_percentage_loss_and_mmi.txt' % TESTDATA)\n reference_points = []\n MMI = []\n DAM = []\n for line in fid.readlines()[1:]:\n fields = line.strip().split(',')\n\n lon = float(fields[4][1:-1])\n lat = float(fields[3][1:-1])\n mmi = float(fields[-1][1:-1])\n\n reference_points.append((lon, lat))\n MMI.append(mmi)\n\n # Verify that coordinates are consistent\n msg = 'Interpolated coordinates do not match those of test data'\n assert numpy.allclose(Icoordinates, reference_points), msg\n\n # Verify interpolated MMI with test result\n for i in range(len(MMI)):\n calculated_mmi = Iattributes[i]['mmi']\n\n # Check that interpolated points are within range\n msg = ('Interpolated mmi %f was outside extrema: '\n '[%f, %f]. ' % (calculated_mmi, mmi_min, mmi_max))\n assert mmi_min <= calculated_mmi <= mmi_max, msg\n\n # Check that result is within 2% - this is good enough\n # as this was calculated using EQRM and thus different.\n assert numpy.allclose(calculated_mmi, MMI[i], rtol=0.02)\n\n def test_interpolation_tsunami(self):\n \"\"\"Interpolation using tsunami data set works\n\n This is test for issue #19 about interpolation overshoot\n \"\"\"\n\n # Name file names for hazard level, exposure and expected fatalities\n hazard_filename = ('%s/tsunami_max_inundation_depth_BB_'\n 'geographic.asc' % TESTDATA)\n exposure_filename = ('%s/tsunami_exposure_BB.shp' % TESTDATA)\n\n # Read input data\n hazard_raster = read_layer(hazard_filename)\n A = hazard_raster.get_data()\n depth_min, depth_max = hazard_raster.get_extrema()\n\n exposure_vector = read_layer(exposure_filename)\n coordinates = exposure_vector.get_geometry()\n attributes = exposure_vector.get_data()\n\n # Test riab's interpolation function\n I = hazard_raster.interpolate(exposure_vector,\n name='depth')\n Icoordinates = I.get_geometry()\n Iattributes = I.get_data()\n assert numpy.allclose(Icoordinates, coordinates)\n\n # Verify interpolated values with test result\n for i in range(len(Icoordinates)):\n\n interpolated_depth = Iattributes[i]['depth']\n # Check that interpolated points are within range\n msg = ('Interpolated depth %f at point %i was outside extrema: '\n '[%f, %f]. ' % (interpolated_depth, i,\n depth_min, depth_max))\n\n if not numpy.isnan(interpolated_depth):\n assert depth_min <= interpolated_depth <= depth_max, msg\n\n def test_interpolation_tsunami_maumere(self):\n \"\"\"Interpolation using tsunami data set from Maumere\n\n This is a test for interpolation (issue #19)\n \"\"\"\n\n # Name file names for hazard level, exposure and expected fatalities\n hazard_filename = ('%s/maumere_aos_depth_20m_land_wgs84.asc'\n % TESTDATA)\n exposure_filename = ('%s/maumere_pop_prj.shp' % TESTDATA)\n\n # Read input data\n H = read_layer(hazard_filename)\n A = H.get_data()\n depth_min, depth_max = H.get_extrema()\n\n # Compare extrema to values read off QGIS for this layer\n assert numpy.allclose([depth_min, depth_max], [0.0, 16.68],\n rtol=1.0e-6, atol=1.0e-10)\n\n E = read_layer(exposure_filename)\n coordinates = E.get_geometry()\n attributes = E.get_data()\n\n # Test riab's interpolation function\n I = H.interpolate(E, name='depth')\n Icoordinates = I.get_geometry()\n Iattributes = I.get_data()\n assert numpy.allclose(Icoordinates, coordinates)\n\n N = len(Icoordinates)\n assert N == 891\n\n # Verify interpolated values with test result\n for i in range(N):\n\n interpolated_depth = Iattributes[i]['depth']\n pointid = attributes[i]['POINTID']\n\n if pointid == 263:\n\n #print i, pointid, attributes[i],\n #print interpolated_depth, coordinates[i]\n\n # Check that location is correct\n assert numpy.allclose(coordinates[i],\n [122.20367299, -8.61300358])\n\n # This is known to be outside inundation area so should\n # near zero\n assert numpy.allclose(interpolated_depth, 0.0,\n rtol=1.0e-12, atol=1.0e-12)\n\n if pointid == 148:\n # Check that location is correct\n assert numpy.allclose(coordinates[i],\n [122.2045912, -8.608483265])\n\n # This is in an inundated area with a surrounding depths of\n # 4.531, 3.911\n # 2.675, 2.583\n assert interpolated_depth < 4.531\n assert interpolated_depth < 3.911\n assert interpolated_depth > 2.583\n assert interpolated_depth > 2.675\n\n # This is a characterisation test for bilinear interpolation\n assert numpy.allclose(interpolated_depth, 3.62477215491,\n rtol=1.0e-12, atol=1.0e-12)\n\n # Check that interpolated points are within range\n msg = ('Interpolated depth %f at point %i was outside extrema: '\n '[%f, %f]. ' % (interpolated_depth, i,\n depth_min, depth_max))\n\n if not numpy.isnan(interpolated_depth):\n assert depth_min <= interpolated_depth <= depth_max, msg\n\n\nif __name__ == '__main__':\n suite = unittest.makeSuite(Test_Engine, 'test')\n runner = unittest.TextTestRunner(verbosity=2)\n runner.run(suite)\n" }, { "alpha_fraction": 0.49020978808403015, "alphanum_fraction": 0.5108391642570496, "avg_line_length": 36.467247009277344, "blob_id": "84f20e0ca89387e8c5b7b1a4a590e4f6200f9bdf", "content_id": "57998e7d8a4960edf17fc9b7a5b523eb10f27922", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8580, "license_type": "no_license", "max_line_length": 195, "num_lines": 229, "path": "/impact/plugins/tsunami/tsunami_building_impact.py", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "from django.template.loader import render_to_string\nfrom impact.plugins.core import FunctionProvider\nfrom impact.storage.vector import Vector\nfrom django.utils.translation import ugettext as _\nfrom impact.plugins.utilities import PointZoomSize\nfrom impact.plugins.utilities import PointClassColor\nfrom impact.plugins.utilities import PointSymbol\nimport scipy.stats\n\n\nclass TsunamiBuildingImpactFunction(FunctionProvider):\n \"\"\"Risk plugin for tsunami impact on building data\n\n :param requires category=='hazard' and \\\n subcategory.startswith('tsunami') and \\\n layer_type=='raster'\n :param requires category=='exposure' and \\\n subcategory.startswith('building') and \\\n layer_type=='vector' and \\\n datatype=='osm'\n \"\"\"\n\n target_field = 'ICLASS'\n\n def run(self, layers):\n \"\"\"Risk plugin for tsunami population\n \"\"\"\n\n # Extract data\n H = layers[0] # Depth\n E = layers[1] # Building locations\n\n #print 'Number of polygons', len(E)\n\n # Interpolate hazard level to building locations\n H = H.interpolate(E)\n\n # Extract relevant numerical data\n coordinates = E.get_geometry()\n depth = H.get_data()\n N = len(depth)\n\n # List attributes to carry forward to result layer\n attributes = E.get_attribute_names()\n\n # Calculate building impact according to guidelines\n count3 = 0\n count1 = 0\n count0 = 0\n population_impact = []\n for i in range(N):\n\n # Get depth\n dep = float(depth[i].values()[0])\n\n # Classify buildings according to depth\n if dep >= 3:\n affected = 3 # FIXME: Colour upper bound is 100 but\n count3 += 1 # does not catch affected == 100\n elif 1 <= dep < 3:\n affected = 2\n count1 += 1\n else:\n affected = 1\n count0 += 1\n\n # Collect depth and calculated damage\n result_dict = {self.target_field: affected,\n 'DEPTH': dep}\n\n # Carry all original attributes forward\n for key in attributes:\n result_dict[key] = E.get_data(key, i)\n\n # Record result for this feature\n population_impact.append(result_dict)\n\n # Create report\n caption = ('<table border=\"0\" width=\"320px\">'\n ' <tr><th><b>%s</b></th><th><b>%s</b></th></th>'\n ' <tr></tr>'\n ' <tr><td>%s&#58;</td><td>%i</td></tr>'\n ' <tr><td>%s&#58;</td><td>%i</td></tr>'\n ' <tr><td>%s&#58;</td><td>%i</td></tr>'\n '</table>' % ('ketinggian tsunami', 'Jumlah gedung',\n '< 1 m', count0,\n '1 - 3 m', count1,\n '> 3 m', count3))\n\n # Create vector layer and return\n V = Vector(data=population_impact,\n projection=E.get_projection(),\n geometry=coordinates,\n name='Estimate of buildings affected',\n keywords={'caption': caption})\n return V\n\n def generate_style(self, data):\n \"\"\"Generates a polygon SLD file based on the data values\n \"\"\"\n\n # FIXME (Ole): Return static style to start with: ticket #144\n style = \"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<sld:StyledLayerDescriptor xmlns=\"http://www.opengis.net/sld\" xmlns:sld=\"http://www.opengis.net/sld\" xmlns:ogc=\"http://www.opengis.net/ogc\" xmlns:gml=\"http://www.opengis.net/gml\" version=\"1.0.0\">\n <sld:NamedLayer>\n <sld:Name>earthquake_impact</sld:Name>\n <sld:UserStyle>\n <sld:Name>earthquake_impact</sld:Name>\n <sld:Title/>\n <sld:FeatureTypeStyle>\n <sld:Name>name</sld:Name>\n <sld:Rule>\n <sld:Name>1</sld:Name>\n <sld:Title>Low</sld:Title>\n <ogc:Filter>\n <ogc:PropertyIsLessThan>\n <ogc:PropertyName>ICLASS</ogc:PropertyName>\n <ogc:Literal>1.5</ogc:Literal>\n </ogc:PropertyIsLessThan>\n </ogc:Filter>\n <sld:PolygonSymbolizer>\n <sld:Fill>\n <sld:CssParameter name=\"fill\">#1EFC7C</sld:CssParameter>\n </sld:Fill>\n <sld:Stroke>\n <sld:CssParameter name=\"stroke\">#0EEC6C</sld:CssParameter>\n </sld:Stroke>\n </sld:PolygonSymbolizer>\n </sld:Rule>\n <sld:Rule>\n <sld:Name>2</sld:Name>\n <sld:Title>Medium</sld:Title>\n <ogc:Filter>\n <ogc:And>\n <ogc:PropertyIsGreaterThanOrEqualTo>\n <ogc:PropertyName>ICLASS</ogc:PropertyName>\n <ogc:Literal>1.5</ogc:Literal>\n </ogc:PropertyIsGreaterThanOrEqualTo>\n <ogc:PropertyIsLessThan>\n <ogc:PropertyName>ICLASS</ogc:PropertyName>\n <ogc:Literal>2.5</ogc:Literal>\n </ogc:PropertyIsLessThan>\n </ogc:And>\n </ogc:Filter>\n <sld:PolygonSymbolizer>\n <sld:Fill>\n <sld:CssParameter name=\"fill\">#FD8D3C</sld:CssParameter>\n </sld:Fill>\n <sld:Stroke>\n <sld:CssParameter name=\"stroke\">#ED7D2C</sld:CssParameter>\n </sld:Stroke>\n </sld:PolygonSymbolizer>\n </sld:Rule>\n <sld:Rule>\n <sld:Name>3</sld:Name>\n <sld:Title>High</sld:Title>\n <ogc:Filter>\n <ogc:PropertyIsGreaterThanOrEqualTo>\n <ogc:PropertyName>ICLASS</ogc:PropertyName>\n <ogc:Literal>2.5</ogc:Literal>\n </ogc:PropertyIsGreaterThanOrEqualTo>\n </ogc:Filter>\n <sld:PolygonSymbolizer>\n <sld:Fill>\n <sld:CssParameter name=\"fill\">#F31A1C</sld:CssParameter>\n </sld:Fill>\n <sld:Stroke>\n <sld:CssParameter name=\"stroke\">#E30A0C</sld:CssParameter>\n </sld:Stroke>\n </sld:PolygonSymbolizer>\n </sld:Rule>\n </sld:FeatureTypeStyle>\n </sld:UserStyle>\n </sld:NamedLayer>\n</sld:StyledLayerDescriptor>\n\"\"\"\n\n return style\n\n def Xgenerate_style(self, data):\n \"\"\"Generates and SLD file based on the data values\n \"\"\"\n #DEFAULT_SYMBOL = 'ttf://Webdings#0x0067'\n DEFAULT_SYMBOL = 'circle'\n\n symbol_field = None\n symbol_keys = [None, '']\n symbol_values = [DEFAULT_SYMBOL, DEFAULT_SYMBOL]\n\n # Zoom levels (large number means close up)\n scale_keys = [50000000000, 10000000000, 10000000, 5000000,\n 1000000, 500000, 250000, 100000]\n scale_values = [2, 4, 6, 8, 1, 1, 1, 1]\n\n # Predefined colour classes\n class_keys = ['< 1 m', '1 - 3 m', '> 3 m']\n class_values = [{'min': 0.5, 'max': 1.5,\n 'color': '#cccccc', 'opacity': '1'},\n {'min': 1.5, 'max': 2.5,\n 'color': '#fd8d3c', 'opacity': '1'},\n {'min': 2.5, 'max': 3.5,\n 'color': '#e31a1c', 'opacity': '1'}]\n\n if self.symbol_field in data.get_attribute_names():\n symbol_field = self.symbol_field\n\n symbol_keys.extend(['Church/Mosque', 'Commercial (office)',\n 'Hotel',\n 'Medical facility', 'Other',\n 'Other industrial',\n 'Residential', 'Retail', 'School',\n 'Unknown', 'Warehouse'])\n\n symbol_values.extend([DEFAULT_SYMBOL, DEFAULT_SYMBOL,\n DEFAULT_SYMBOL,\n DEFAULT_SYMBOL, DEFAULT_SYMBOL,\n DEFAULT_SYMBOL,\n DEFAULT_SYMBOL, DEFAULT_SYMBOL,\n DEFAULT_SYMBOL,\n DEFAULT_SYMBOL, DEFAULT_SYMBOL])\n\n params = dict(name=data.get_name(),\n damage_field=self.target_field,\n symbol_field=symbol_field,\n symbols=dict(zip(symbol_keys, symbol_values)),\n scales=dict(zip(scale_keys, scale_values)),\n classifications=dict(zip(class_keys, class_values)))\n\n return render_to_string('impact/styles/point_classes.sld', params)\n" }, { "alpha_fraction": 0.7360115051269531, "alphanum_fraction": 0.7474892139434814, "avg_line_length": 18.91428565979004, "blob_id": "bed35690e10f76529de5683c07edd934c5af1133", "content_id": "95cd6b103eb2e209e78b5a466287c18fced4305e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 697, "license_type": "no_license", "max_line_length": 76, "num_lines": 35, "path": "/calculator/README.md", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "# Risiko Calculator\n\n## Preparation\n\nInitialize the build environment.\n\n ant init\n\nYou only need to run `ant init` once (or any time dependencies change).\n\n## Debug Mode\n\nLoads all scripts uncompressed.\n\n ant debug\n\nThis will give you an application available at http://localhost:8080/ by\ndefault.\n\nTo use a GeoServer instance other than\nhttp://localhost:8001/geoserver-geonode-dev, add the following option to the\n`ant debug` command:\n\n -Dapp.proxy.geoserver=<geoserver_url>\n\nwhere `<geoserver_url>` is e.g.\nhttp://my.risiko.box/geoserver-geonode-dev/\n\n## Prepare App for Deployment\n\nTo create a servlet run the following:\n\n ant\n\nThe servlet will be assembled in the build directory.\n" }, { "alpha_fraction": 0.5373571515083313, "alphanum_fraction": 0.5426310896873474, "avg_line_length": 30.601852416992188, "blob_id": "1a516cd12a04e32e0815ffea4d5bc5312801c8d3", "content_id": "629cfe816d780ab00a56e0163197095da81c0295", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3413, "license_type": "no_license", "max_line_length": 79, "num_lines": 108, "path": "/impact/storage/projection.py", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "\"\"\"Class projection\n\"\"\"\n\nfrom osgeo import osr\n\n# The projection string depends on the gdal version\nDEFAULT_PROJECTION = '+proj=longlat +datum=WGS84 +no_defs'\n\n\nclass Projection:\n \"\"\"Represents projections associated with layers\n \"\"\"\n\n def __init__(self, p):\n \"\"\"Constructor for Projection.\n\n input:\n p: Projection information.\n Any of the GDAL formats are OK including WKT, proj4, ESRI, XML\n It can also be an instance of Projection.\n \"\"\"\n\n if p is None:\n msg = 'Requested projection is None'\n raise TypeError(msg)\n\n # Clean input string. This will also work when p is of class\n # Projection by virtue of its __repr__()\n p = str(p).strip()\n\n # Create OSR spatial reference object\n srs = self.spatial_reference = osr.SpatialReference()\n\n # Try importing\n input_OK = False\n for import_func in [srs.ImportFromProj4,\n srs.ImportFromWkt,\n srs.ImportFromEPSG,\n srs.ImportFromESRI,\n srs.ImportFromMICoordSys,\n srs.ImportFromPCI,\n srs.ImportFromXML,\n srs.ImportFromUSGS,\n srs.ImportFromUrl]:\n\n res = import_func(p)\n if res == 0:\n input_OK = True\n break\n\n if not input_OK:\n msg = 'Spatial reference %s was not recognised' % p\n raise TypeError(msg)\n\n # Store some - FIXME this is only for backwards compat, remove.\n self.wkt = self.get_projection(proj4=False)\n self.proj4 = self.get_projection(proj4=True)\n\n def __repr__(self):\n return self.wkt\n\n def get_projection(self, proj4=False):\n \"\"\"Return projection\n\n Input\n proj4: If True, projection will be returned in format suitable\n for comparison.\n If False (default) projection will be returned in WKT format\n\n # FIXME: Maybe add all formats somehow\n \"\"\"\n\n if proj4:\n p = self.spatial_reference.ExportToProj4()\n else:\n p = self.spatial_reference.ExportToWkt()\n\n return p.strip()\n\n def __eq__(self, other):\n \"\"\"Override '==' to allow comparison with other projection objecs\n \"\"\"\n\n try:\n other = Projection(other)\n except Exception, e:\n msg = ('Argument to == must be a spatial reference or object'\n ' of class Projection. I got %s with error '\n 'message: %s' % (str(other), e))\n raise TypeError(msg)\n\n if self.spatial_reference.IsSame(other.spatial_reference):\n # Native comparison checks out\n return True\n else:\n # We have seen cases where the native comparison didn't work\n # for projections that should be identical. See e.g.\n # https://github.com/AIFDR/riab/issues/160\n # Hence do a secondary check using the proj4 string\n\n return (self.get_projection(proj4=True) ==\n other.get_projection(proj4=True))\n\n def __ne__(self, other):\n \"\"\"Override '!=' to allow comparison with other projection objecs\n \"\"\"\n\n return not self == other\n" }, { "alpha_fraction": 0.7635542154312134, "alphanum_fraction": 0.7635542154312134, "avg_line_length": 65.30000305175781, "blob_id": "54036c38cff04aef00f1a0eee0521e1244edc5f5", "content_id": "119711f3c11b285b633a0c5edc46724fed2ae4d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 664, "license_type": "no_license", "max_line_length": 294, "num_lines": 10, "path": "/docs/usage/plugins/examples_intro.rst", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "===============\nExample Plugins\n===============\n\nIntroduction\n------------\n\nThe following section list a number of example plugins for earthquake, tsunami and flood. They demonstrate a number of the different ways that plugins might be developed including a demonstration of styling, accessing layer metadata and conditional impact calculation based on the metadata.\n\n.. warning:: This plugin code in these examples is provided for example and tutorial purposes only. They are not guarenteed to produce the valid impact results under all situations and may rely on a number of (unstated) assumptions about the underlying data. In other words you milage may vary.\n\n" }, { "alpha_fraction": 0.472809374332428, "alphanum_fraction": 0.5152594447135925, "avg_line_length": 35.232723236083984, "blob_id": "891cfb0d560130f5c7dd2905297e79aafa6e0e05", "content_id": "bd1d8190bcaa1dbfd7a93eb2a6ae5016edbcbb27", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 51378, "license_type": "no_license", "max_line_length": 79, "num_lines": 1418, "path": "/impact/tests/test_io.py", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "import unittest\nimport numpy\nimport os\nimport impact\n\nfrom osgeo import gdal\n\nfrom impact.storage.raster import Raster\nfrom impact.storage.vector import Vector\nfrom impact.storage.vector import convert_polygons_to_centroids\nfrom impact.storage.projection import Projection\nfrom impact.storage.projection import DEFAULT_PROJECTION\nfrom impact.storage.io import read_layer\nfrom impact.storage.io import write_vector_data\nfrom impact.storage.io import write_raster_data\nfrom impact.storage.utilities import unique_filename\nfrom impact.storage.utilities import write_keywords\nfrom impact.storage.utilities import read_keywords\nfrom impact.storage.utilities import bbox_intersection\nfrom impact.storage.utilities import minimal_bounding_box\nfrom impact.storage.utilities import buffered_bounding_box\nfrom impact.storage.utilities import array2wkt\nfrom impact.storage.utilities import calculate_polygon_area\nfrom impact.storage.utilities import calculate_polygon_centroid\nfrom impact.storage.utilities import geotransform2bbox\nfrom impact.storage.utilities import geotransform2resolution\nfrom impact.storage.utilities import nanallclose\nfrom impact.storage.io import get_bounding_box\nfrom impact.storage.io import bboxlist2string, bboxstring2list\nfrom impact.tests.utilities import same_API\nfrom impact.tests.utilities import TESTDATA\nfrom impact.tests.utilities import FEATURE_COUNTS\nfrom impact.tests.utilities import GEOTRANSFORMS\n\n\n# Auxiliary function for raster test\ndef linear_function(x, y):\n \"\"\"Auxiliary function for use with raster test\n \"\"\"\n\n return x + y / 2.\n\n\nclass Test_IO(unittest.TestCase):\n \"\"\"Tests for reading and writing of raster and vector data\n \"\"\"\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def test_instantiation_of_empty_layers(self):\n \"\"\"Vector and Raster objects can be instantiated with None\n \"\"\"\n\n v = Vector(None)\n assert v.get_name().startswith('Vector')\n\n r = Raster(None)\n assert r.get_name().startswith('Raster')\n\n def test_vector_feature_count(self):\n \"\"\"Number of features read from vector data is as expected\n \"\"\"\n\n # Read and verify test data\n for vectorname in ['lembang_schools.shp',\n 'tsunami_exposure_BB.shp',\n 'Padang_WGS84.shp',\n 'OSM_building_polygons_20110905.shp',\n 'OSM_subset.shp']:\n\n filename = '%s/%s' % (TESTDATA, vectorname)\n layer = read_layer(filename)\n coords = layer.get_geometry()\n attributes = layer.get_data()\n\n # Check basic data integrity\n N = len(layer)\n assert len(coords) == N\n assert len(attributes) == N\n assert FEATURE_COUNTS[vectorname] == N\n\n def test_reading_and_writing_of_vector_point_data(self):\n \"\"\"Vector point data can be read and written correctly\n \"\"\"\n\n # First test that some error conditions are caught\n filename = unique_filename(suffix='nshoe66u')\n try:\n read_layer(filename)\n except Exception:\n pass\n else:\n msg = 'Exception for unknown extension should have been raised'\n raise Exception(msg)\n\n filename = unique_filename(suffix='.gml')\n try:\n read_layer(filename)\n except IOError:\n pass\n else:\n msg = 'Exception for non-existing file should have been raised'\n raise Exception(msg)\n\n # Read and verify test data\n for vectorname in ['lembang_schools.shp',\n 'tsunami_exposure_BB.shp',\n 'Padang_WGS84.shp']:\n\n filename = '%s/%s' % (TESTDATA, vectorname)\n layer = read_layer(filename)\n coords = numpy.array(layer.get_geometry())\n attributes = layer.get_data()\n\n # Check basic data integrity\n N = len(layer)\n assert coords.shape[0] == N\n assert coords.shape[1] == 2\n\n assert FEATURE_COUNTS[vectorname] == N\n\n assert isinstance(layer.get_name(), basestring)\n\n # Check projection\n wkt = layer.get_projection(proj4=False)\n assert wkt.startswith('GEOGCS')\n\n assert layer.projection == Projection(DEFAULT_PROJECTION)\n\n # Check integrity of each feature\n field_names = None\n for i in range(N):\n # Consistency between of geometry and fields\n\n x1 = coords[i, 0]\n x2 = attributes[i]['LONGITUDE']\n assert x2 is not None\n msg = 'Inconsistent longitudes: %f != %f' % (x1, x2)\n assert numpy.allclose(x1, x2), msg\n\n x1 = coords[i, 1]\n x2 = attributes[i]['LATITUDE']\n assert x2 is not None\n msg = 'Inconsistent longitudes: %f != %f' % (x1, x2)\n assert numpy.allclose(x1, x2), msg\n\n # Verify that each feature has the same fields\n if field_names is None:\n field_names = attributes[i].keys()\n else:\n assert len(field_names) == len(attributes[i].keys())\n assert field_names == attributes[i].keys()\n\n # Write data back to file\n # FIXME (Ole): I would like to use gml here, but OGR does not\n # store the spatial reference! Ticket #18\n out_filename = unique_filename(suffix='.shp')\n write_vector_data(attributes, wkt, coords, out_filename)\n\n # Read again and check\n layer = read_layer(out_filename)\n coords = numpy.array(layer.get_geometry())\n attributes = layer.get_data()\n\n # Check basic data integrity\n N = len(layer)\n assert coords.shape[0] == N\n assert coords.shape[1] == 2\n\n # Check projection\n assert layer.projection == Projection(DEFAULT_PROJECTION)\n\n # Check integrity of each feature\n field_names = None\n for i in range(N):\n\n # Consistency between of geometry and fields\n x1 = coords[i, 0]\n x2 = attributes[i]['LONGITUDE']\n assert x2 is not None\n msg = 'Inconsistent longitudes: %f != %f' % (x1, x2)\n assert numpy.allclose(x1, x2), msg\n\n x1 = coords[i, 1]\n x2 = attributes[i]['LATITUDE']\n assert x2 is not None\n msg = 'Inconsistent longitudes: %f != %f' % (x1, x2)\n assert numpy.allclose(x1, x2), msg\n\n # Verify that each feature has the same fields\n if field_names is None:\n field_names = attributes[i].keys()\n else:\n assert len(field_names) == len(attributes[i].keys())\n assert field_names == attributes[i].keys()\n\n # Test individual extraction\n lon = layer.get_data(attribute='LONGITUDE')\n assert numpy.allclose(lon, coords[:, 0])\n\n def test_analysis_of_vector_data_top_N(self):\n \"\"\"Analysis of vector data - get top N of an attribute\n \"\"\"\n\n for vectorname in ['lembang_schools.shp',\n 'tsunami_exposure_BB.shp']:\n\n filename = '%s/%s' % (TESTDATA, vectorname)\n layer = read_layer(filename)\n coords = layer.get_geometry()\n attributes = layer.get_data()\n\n # Check exceptions\n try:\n L = layer.get_topN(attribute='FLOOR_AREA', N=0)\n except AssertionError:\n pass\n else:\n msg = 'Exception should have been raised for N == 0'\n raise Exception(msg)\n\n # Check results\n for N in [5, 10, 11, 17]:\n if vectorname == 'lembang_schools.shp':\n L = layer.get_topN(attribute='FLOOR_AREA', N=N)\n assert len(L) == N\n assert L.get_projection() == layer.get_projection()\n #print [a['FLOOR_AREA'] for a in L.attributes]\n elif vectorname == 'tsunami_exposure_BB.shp':\n L = layer.get_topN(attribute='STR_VALUE', N=N)\n assert len(L) == N\n assert L.get_projection() == layer.get_projection()\n val = [a['STR_VALUE'] for a in L.data]\n\n ref = [a['STR_VALUE'] for a in attributes]\n ref.sort()\n\n assert numpy.allclose(val, ref[-N:],\n atol=1.0e-12, rtol=1.0e-12)\n else:\n raise Exception\n\n def test_vector_class(self):\n \"\"\"Consistency of vector class for point data\n \"\"\"\n\n # Read data file\n layername = 'lembang_schools.shp'\n filename = '%s/%s' % (TESTDATA, layername)\n V = read_layer(filename)\n\n # Make a smaller dataset\n V_ref = V.get_topN('FLOOR_AREA', 5)\n\n geometry = V_ref.get_geometry()\n data = V_ref.get_data()\n projection = V_ref.get_projection()\n\n # Create new object from test data\n V_new = Vector(data=data, projection=projection, geometry=geometry)\n\n # Check\n assert V_new == V_ref\n assert not V_new != V_ref\n\n # Write this new object, read it again and check\n tmp_filename = unique_filename(suffix='.shp')\n V_new.write_to_file(tmp_filename)\n\n V_tmp = read_layer(tmp_filename)\n assert V_tmp == V_ref\n assert not V_tmp != V_ref\n\n # Check that equality raises exception when type is wrong\n try:\n V_tmp == Raster()\n except TypeError:\n pass\n else:\n msg = 'Should have raised TypeError'\n raise Exception(msg)\n\n def test_reading_and_writing_of_vector_polygon_data(self):\n \"\"\"Vector polygon data can be read and written correctly\n \"\"\"\n\n # Read and verify test data\n vectorname = 'kecamatan_geo.shp'\n\n filename = '%s/%s' % (TESTDATA, vectorname)\n layer = read_layer(filename)\n geometry = layer.get_geometry()\n attributes = layer.get_data()\n\n # Check basic data integrity\n N = len(layer)\n\n assert len(geometry) == N\n assert len(attributes) == N\n assert len(attributes[0]) == 8\n\n assert FEATURE_COUNTS[vectorname] == N\n assert isinstance(layer.get_name(), basestring)\n\n # Check projection\n wkt = layer.get_projection(proj4=False)\n assert wkt.startswith('GEOGCS')\n\n assert layer.projection == Projection(DEFAULT_PROJECTION)\n\n # Check each polygon\n for i in range(N):\n geom = geometry[i]\n n = geom.shape[0]\n assert n > 2\n assert geom.shape[1] == 2\n\n # Check that polygon is closed\n assert numpy.allclose(geom[0], geom[-1], rtol=0)\n\n # But that not all points are the same\n max_dist = 0\n for j in range(n):\n d = numpy.sum((geom[j] - geom[0]) ** 2) / n\n if d > max_dist:\n max_dist = d\n assert max_dist > 0\n\n # Check integrity of each feature\n expected_features = {13: {'AREA': 28760732,\n 'POP_2007': 255383,\n 'KECAMATAN': 'kali deres',\n 'KEPADATAN': 60,\n 'PROPINSI': 'DKI JAKARTA'},\n 21: {'AREA': 13155073,\n 'POP_2007': 247747,\n 'KECAMATAN': 'kramat jati',\n 'KEPADATAN': 150,\n 'PROPINSI': 'DKI JAKARTA'},\n 35: {'AREA': 4346540,\n 'POP_2007': 108274,\n 'KECAMATAN': 'senen',\n 'KEPADATAN': 246,\n 'PROPINSI': 'DKI JAKARTA'}}\n\n field_names = None\n for i in range(N):\n # Consistency with attributes read manually with qgis\n\n if i in expected_features:\n att = attributes[i]\n exp = expected_features[i]\n\n for key in exp:\n msg = ('Expected attribute %s was not found in feature %i'\n % (key, i))\n assert key in att, msg\n\n a = att[key]\n e = exp[key]\n msg = 'Got %s: \"%s\" but expected \"%s\"' % (key, a, e)\n assert a == e, msg\n\n # Write data back to file\n # FIXME (Ole): I would like to use gml here, but OGR does not\n # store the spatial reference! Ticket #18\n out_filename = unique_filename(suffix='.shp')\n write_vector_data(attributes, wkt, geometry, out_filename)\n\n # Read again and check\n layer = read_layer(out_filename)\n geometry_new = layer.get_geometry()\n attributes_new = layer.get_data()\n\n N = len(layer)\n assert len(geometry_new) == N\n assert len(attributes_new) == N\n\n for i in range(N):\n assert numpy.allclose(geometry[i],\n geometry_new[i],\n rtol=1.0e-6) # OGR works in single precision\n\n assert len(attributes_new[i]) == 8\n for key in attributes_new[i]:\n assert attributes_new[i][key] == attributes[i][key]\n\n def test_centroids_from_polygon_data(self):\n \"\"\"Centroid point data can be derived from polygon data\n\n Test againts centroid data generated by qgis: named *_centroids.shp\n \"\"\"\n\n for vectorname in ['kecamatan_geo.shp',\n 'OSM_subset.shp']:\n\n # Read and verify test data\n filename = '%s/%s' % (TESTDATA, vectorname)\n p_layer = read_layer(filename)\n p_geometry = p_layer.get_geometry()\n p_attributes = p_layer.get_data()\n N = len(p_layer)\n assert FEATURE_COUNTS[vectorname] == N\n\n # Read reference centroids generated by Qgis\n filename = '%s/%s' % (TESTDATA, vectorname[:-4] + '_centroids.shp')\n if os.path.isfile(filename):\n r_layer = read_layer(filename)\n r_geometry = r_layer.get_geometry()\n r_attributes = r_layer.get_data()\n assert len(r_layer) == N\n\n # Compute centroid data\n c_layer = convert_polygons_to_centroids(p_layer)\n assert len(c_layer) == N\n c_geometry = c_layer.get_geometry()\n c_attributes = c_layer.get_data()\n\n # Check that attributes are the same\n for i in range(N):\n p_att = p_attributes[i]\n c_att = c_attributes[i]\n r_att = r_attributes[i]\n for key in p_att:\n assert key in c_att\n assert c_att[key] == p_att[key]\n\n assert key in r_att\n assert c_att[key] == r_att[key]\n\n # Check that coordinates are the same up to machine precision\n for i in range(N):\n c_geom = c_geometry[i]\n r_geom = r_geometry[i]\n\n assert numpy.allclose(c_geom, r_geom,\n rtol=0.0, atol=1.0e-9)\n\n # Write to file (for e.g. visual inspection)\n out_filename = unique_filename(prefix='centroid', suffix='.shp')\n #print 'writing to', out_filename\n c_layer.write_to_file(out_filename)\n\n def test_rasters_and_arrays(self):\n \"\"\"Consistency of rasters and associated arrays\n \"\"\"\n\n # Create test data\n lon_ul = 100 # Longitude of upper left corner\n lat_ul = 10 # Latitude of upper left corner\n numlon = 8 # Number of longitudes\n numlat = 5 # Number of latitudes\n dlon = 1\n dlat = -1\n\n # Define array where latitudes are rows and longitude columns\n A1 = numpy.zeros((numlat, numlon))\n\n # Establish coordinates for lower left corner\n lat_ll = lat_ul - numlat\n lon_ll = lon_ul\n\n # Define pixel centers along each direction\n lon = numpy.linspace(lon_ll + 0.5, lon_ll + numlon - 0.5, numlon)\n lat = numpy.linspace(lat_ll + 0.5, lat_ll + numlat - 0.5, numlat)\n\n # Define raster with latitudes going bottom-up (south to north).\n # Longitudes go left-right (west to east)\n for i in range(numlat):\n for j in range(numlon):\n A1[numlat - 1 - i, j] = linear_function(lon[j], lat[i])\n\n # Upper left corner\n assert A1[0, 0] == 105.25\n assert A1[0, 0] == linear_function(lon[0], lat[4])\n\n # Lower left corner\n assert A1[4, 0] == 103.25\n assert A1[4, 0] == linear_function(lon[0], lat[0])\n\n # Upper right corner\n assert A1[0, 7] == 112.25\n assert A1[0, 7] == linear_function(lon[7], lat[4])\n\n # Lower right corner\n assert A1[4, 7] == 110.25\n assert A1[4, 7] == linear_function(lon[7], lat[0])\n\n # Generate raster object and write\n projection = ('GEOGCS[\"WGS 84\",'\n 'DATUM[\"WGS_1984\",'\n 'SPHEROID[\"WGS 84\",6378137,298.2572235630016,'\n 'AUTHORITY[\"EPSG\",\"7030\"]],'\n 'AUTHORITY[\"EPSG\",\"6326\"]],'\n 'PRIMEM[\"Greenwich\",0],'\n 'UNIT[\"degree\",0.0174532925199433],'\n 'AUTHORITY[\"EPSG\",\"4326\"]]')\n geotransform = (lon_ul, dlon, 0, lat_ul, 0, dlat)\n R1 = Raster(A1, projection, geotransform,\n keywords={'testdata': None, 'size': 'small'})\n\n msg = ('Dimensions of raster array do not match those of '\n 'raster object')\n assert numlat == R1.rows, msg\n assert numlon == R1.columns, msg\n\n # Write back to new (tif) file\n out_filename = unique_filename(suffix='.tif')\n R1.write_to_file(out_filename)\n\n # Read again and check consistency\n R2 = read_layer(out_filename)\n\n msg = ('Dimensions of written raster array do not match those '\n 'of input raster file\\n')\n msg += (' Dimensions of input file '\n '%s: (%s, %s)\\n' % (R1.filename, numlat, numlon))\n msg += (' Dimensions of output file %s: '\n '(%s, %s)' % (R2.filename, R2.rows, R2.columns))\n\n assert numlat == R2.rows, msg\n assert numlon == R2.columns, msg\n\n A2 = R2.get_data()\n\n assert numpy.allclose(numpy.min(A1), numpy.min(A2))\n assert numpy.allclose(numpy.max(A1), numpy.max(A2))\n\n msg = 'Array values of written raster array were not as expected'\n assert numpy.allclose(A1, A2), msg\n\n msg = 'Geotransforms were different'\n assert R1.get_geotransform() == R2.get_geotransform(), msg\n\n p1 = R1.get_projection(proj4=True)\n p2 = R2.get_projection(proj4=True)\n msg = 'Projections were different: %s != %s' % (p1, p2)\n assert p1 == p1, msg\n\n # Exercise projection __eq__ method\n assert R1.projection == R2.projection\n\n # Check that equality raises exception when type is wrong\n try:\n R1.projection == 234\n except TypeError:\n pass\n else:\n msg = 'Should have raised TypeError'\n raise Exception(msg)\n\n # Check keywords\n assert R1.keywords == R2.keywords\n\n # Check override of ==\n assert R1 == R2\n\n def test_reading_and_writing_of_real_rasters(self):\n \"\"\"Rasters can be read and written correctly in different formats\n \"\"\"\n\n for rastername in ['Earthquake_Ground_Shaking_clip.tif',\n 'Population_2010_clip.tif',\n 'shakemap_padang_20090930.asc',\n 'population_padang_1.asc',\n 'population_padang_2.asc']:\n\n filename = '%s/%s' % (TESTDATA, rastername)\n R1 = read_layer(filename)\n\n # Check consistency of raster\n A1 = R1.get_data()\n M, N = A1.shape\n\n msg = ('Dimensions of raster array do not match those of '\n 'raster file %s' % R1.filename)\n assert M == R1.rows, msg\n assert N == R1.columns, msg\n\n # Write back to new file\n for ext in ['.tif']: # Would like to also have , '.asc']:\n out_filename = unique_filename(suffix=ext)\n write_raster_data(A1,\n R1.get_projection(),\n R1.get_geotransform(),\n out_filename,\n keywords=R1.keywords)\n\n # Read again and check consistency\n R2 = read_layer(out_filename)\n\n msg = ('Dimensions of written raster array do not match those '\n 'of input raster file\\n')\n msg += (' Dimensions of input file '\n '%s: (%s, %s)\\n' % (R1.filename, M, N))\n msg += (' Dimensions of output file %s: '\n '(%s, %s)' % (R2.filename, R2.rows, R2.columns))\n\n assert M == R2.rows, msg\n assert N == R2.columns, msg\n\n A2 = R2.get_data()\n\n assert numpy.allclose(numpy.nanmin(A1), numpy.nanmin(A2))\n assert numpy.allclose(numpy.nanmax(A1), numpy.nanmax(A2))\n\n msg = ('Array values of written raster array were not as '\n 'expected')\n assert nanallclose(A1, A2), msg\n\n msg = 'Geotransforms were different'\n assert R1.get_geotransform() == R2.get_geotransform(), msg\n\n p1 = R1.get_projection(proj4=True)\n p2 = R2.get_projection(proj4=True)\n msg = 'Projections were different: %s != %s' % (p1, p2)\n assert p1 == p1, msg\n\n msg = 'Keywords were different: %s != %s' % (R1.keywords,\n R2.keywords)\n assert R1.keywords == R2.keywords, msg\n\n # Use overridden == and != to verify\n assert R1 == R2\n assert not R1 != R2\n\n # Check that equality raises exception when type is wrong\n try:\n R1 == Vector()\n except TypeError:\n pass\n else:\n msg = 'Should have raised TypeError'\n raise Exception(msg)\n\n def test_no_projection(self):\n \"\"\"Raster layers with no projection causes Exception to be raised\n \"\"\"\n\n rastername = 'grid_without_projection.asc'\n filename = '%s/%s' % (TESTDATA, rastername)\n try:\n read_layer(filename)\n except RuntimeError:\n pass\n else:\n msg = 'Should have raised RuntimeError'\n raise Exception(msg)\n\n def test_nodata_value(self):\n \"\"\"NODATA value is correctly recorded in GDAL\n \"\"\"\n\n # Read files with -9999 as nominated nodata value\n for rastername in ['Population_2010_clip.tif',\n 'Lembang_Earthquake_Scenario.asc',\n 'Earthquake_Ground_Shaking.asc']:\n\n filename = '%s/%s' % (TESTDATA, rastername)\n R = read_layer(filename)\n\n A = R.get_data(nan=False)\n\n # Verify nodata value\n Amin = min(A.flat[:])\n msg = ('Raster must have -9999 as its minimum for this test. '\n 'We got %f for file %s' % (Amin, filename))\n assert Amin == -9999, msg\n\n # Verify that GDAL knows about this\n nodata = R.get_nodata_value()\n msg = ('File %s should have registered nodata '\n 'value %i but it was %s' % (filename, Amin, nodata))\n assert nodata == Amin, msg\n\n def test_vector_extrema(self):\n \"\"\"Vector extremum calculation works\n \"\"\"\n\n for layername in ['lembang_schools.shp',\n 'tsunami_exposure_BB.shp']:\n\n filename = '%s/%s' % (TESTDATA, layername)\n L = read_layer(filename)\n\n if layername == 'tsunami_exposure_BB.shp':\n attributes = L.get_data()\n\n for name in ['STR_VALUE', 'CONT_VALUE']:\n minimum, maximum = L.get_extrema(name)\n assert minimum <= maximum\n\n x = [a[name] for a in attributes]\n assert numpy.allclose([min(x), max(x)],\n [minimum, maximum],\n rtol=1.0e-12, atol=1.0e-12)\n\n elif layername == 'lembang_schools.shp':\n minimum, maximum = L.get_extrema('FLOOR_AREA')\n assert minimum == maximum # All identical\n assert maximum == 250\n\n try:\n L.get_extrema('NONEXISTING_ATTRIBUTE_NAME_8462')\n except AssertionError:\n pass\n else:\n msg = ('Non existing attribute name should have '\n 'raised AssertionError')\n raise Exception(msg)\n\n try:\n L.get_extrema()\n except RuntimeError:\n pass\n else:\n msg = ('Missing attribute name should have '\n 'raised RuntimeError')\n raise Exception(msg)\n\n def test_raster_extrema(self):\n \"\"\"Raster extrema (including NAN's) are correct.\n \"\"\"\n\n for rastername in ['Earthquake_Ground_Shaking_clip.tif',\n 'Population_2010_clip.tif',\n 'shakemap_padang_20090930.asc',\n 'population_padang_1.asc',\n 'population_padang_2.asc']:\n\n filename = '%s/%s' % (TESTDATA, rastername)\n R = read_layer(filename)\n\n # Check consistency of raster\n\n # Use numpy to establish the extrema instead of gdal\n minimum, maximum = R.get_extrema()\n\n # Check that arrays with NODATA value replaced by NaN's agree\n A = R.get_data(nan=False)\n B = R.get_data(nan=True)\n\n assert A.dtype == B.dtype\n assert numpy.nanmax(A - B) == 0\n assert numpy.nanmax(B - A) == 0\n assert numpy.nanmax(numpy.abs(A - B)) == 0\n\n # Check that extrema are OK\n assert numpy.allclose(maximum, numpy.max(A[:]))\n assert numpy.allclose(maximum, numpy.nanmax(B[:]))\n assert numpy.allclose(minimum, numpy.nanmin(B[:]))\n\n # Check that nodata can be replaced by 0.0\n C = R.get_data(nan=0.0)\n msg = '-9999 should have been replaced by 0.0 in %s' % rastername\n assert min(C.flat[:]) != -9999, msg\n\n def test_bins(self):\n \"\"\"Linear and quantile bins are correct\n \"\"\"\n\n for filename in ['%s/population_padang_1.asc' % TESTDATA,\n '%s/test_grid.asc' % TESTDATA]:\n\n R = read_layer(filename)\n min, max = R.get_extrema()\n\n for N in [2, 3, 5, 7, 10, 16]:\n linear_intervals = R.get_bins(N=N, quantiles=False)\n\n assert linear_intervals[0] == min\n assert linear_intervals[-1] == max\n\n d = (max - min) / N\n for i in range(N):\n assert numpy.allclose(linear_intervals[i], min + i * d)\n\n quantiles = R.get_bins(N=N, quantiles=True)\n A = R.get_data(nan=True).flat[:]\n\n mask = numpy.logical_not(numpy.isnan(A)) # Omit NaN's\n l1 = len(A)\n A = A.compress(mask)\n l2 = len(A)\n\n if filename == '%s/test_grid.asc' % TESTDATA:\n # Check that NaN's were removed\n assert l1 == 35\n assert l2 == 30\n\n # Assert that there are no NaN's\n assert not numpy.alltrue(numpy.isnan(A))\n\n number_of_elements = len(A)\n average_elements_per_bin = number_of_elements / N\n\n # Count elements in each bin and check\n i0 = quantiles[0]\n for i1 in quantiles[1:]:\n count = numpy.sum((i0 < A) & (A < i1))\n if i0 == quantiles[0]:\n refcount = count\n\n if i1 < quantiles[-1]:\n # Number of elements in each bin must vary by no\n # more than 1\n assert abs(count - refcount) <= 1\n assert abs(count - average_elements_per_bin) <= 3\n else:\n # The last bin is allowed vary by more\n pass\n\n i0 = i1\n\n def test_get_bounding_box(self):\n \"\"\"Bounding box is correctly extracted from file.\n\n # Reference data:\n gdalinfo Earthquake_Ground_Shaking_clip.tif\n Driver: GTiff/GeoTIFF\n Files: Earthquake_Ground_Shaking_clip.tif\n Size is 345, 263\n Coordinate System is:\n GEOGCS[\"WGS 84\",\n DATUM[\"WGS_1984\",\n SPHEROID[\"WGS 84\",6378137,298.2572235630016,\n AUTHORITY[\"EPSG\",\"7030\"]],\n AUTHORITY[\"EPSG\",\"6326\"]],\n PRIMEM[\"Greenwich\",0],\n UNIT[\"degree\",0.0174532925199433],\n AUTHORITY[\"EPSG\",\"4326\"]]\n Origin = (99.364169565217395,-0.004180608365019)\n Pixel Size = (0.008339130434783,-0.008361216730038)\n Metadata:\n AREA_OR_POINT=Point\n TIFFTAG_XRESOLUTION=1\n TIFFTAG_YRESOLUTION=1\n TIFFTAG_RESOLUTIONUNIT=1 (unitless)\n Image Structure Metadata:\n COMPRESSION=LZW\n INTERLEAVE=BAND\n Corner Coordinates:\n Upper Left ( 99.3641696, -0.0041806) ( 99d21'51.01\"E, 0d 0'15.05\"S)\n Lower Left ( 99.3641696, -2.2031806) ( 99d21'51.01\"E, 2d12'11.45\"S)\n Upper Right ( 102.2411696, -0.0041806) (102d14'28.21\"E, 0d 0'15.05\"S)\n Lower Right ( 102.2411696, -2.2031806) (102d14'28.21\"E, 2d12'11.45\"S)\n Center ( 100.8026696, -1.1036806) (100d48'9.61\"E, 1d 6'13.25\"S)\n Band 1 Block=256x256 Type=Float64, ColorInterp=Gray\n\n\n Note post gdal 1.8 it is\n Upper Left ( 99.3600000, 0.0000000) ( 99d21'36.00\"E, 0d 0' 0.01\"N)\n Lower Left ( 99.3600000, -2.1990000) ( 99d21'36.00\"E, 2d11'56.40\"S)\n Upper Right ( 102.2370000, 0.0000000) (102d14'13.20\"E, 0d 0' 0.01\"N)\n Lower Right ( 102.2370000, -2.1990000) (102d14'13.20\"E, 2d11'56.40\"S)\n Center ( 100.7985000, -1.0995000) (100d47'54.60\"E, 1d 5'58.20\"S)\n \"\"\"\n\n # Note there are two possible correct values of bbox depending on\n # the version of gdal:\n # http://trac.osgeo.org/gdal/wiki/rfc33_gtiff_pixelispoint\n\n # Get gdal version number\n x = gdal.VersionInfo('').split()\n y = x[1].split('.')\n z = ''.join(y)[:-1] # Turn into number and strip trailing comma\n\n # Reference bbox for vector data\n ref_bbox = {'tsunami_exposure_BB.shp': [150.124,\n -35.7856,\n 150.295,\n -35.6546]}\n\n # Select correct reference bbox for rasters\n if float(z) < 170:\n ref_bbox['Earthquake_Ground_Shaking_clip.tif'] = [99.3641696,\n -2.2031806,\n 102.2411696,\n -0.0041806]\n else:\n ref_bbox['Earthquake_Ground_Shaking_clip.tif'] = [99.36,\n -2.199,\n 102.237,\n 0.0]\n\n for filename in ['Earthquake_Ground_Shaking_clip.tif',\n 'tsunami_exposure_BB.shp']:\n bbox = get_bounding_box(os.path.join(TESTDATA, filename))\n msg = ('Got bbox %s from filename %s, but expected %s '\n % (str(bbox), filename, str(ref_bbox[filename])))\n assert numpy.allclose(bbox, ref_bbox[filename]), msg\n\n def test_layer_API(self):\n \"\"\"Vector and Raster instances have a similar API\n \"\"\"\n\n # Exceptions\n exclude = ['get_topN', 'get_bins',\n 'get_geotransform',\n 'get_nodata_value',\n 'get_attribute_names',\n 'get_resolution']\n\n V = Vector() # Empty vector instance\n R = Raster() # Empty raster instance\n\n assert same_API(V, R, exclude=exclude)\n\n for layername in ['lembang_schools.shp',\n 'Lembang_Earthquake_Scenario.asc']:\n\n filename = '%s/%s' % (TESTDATA, layername)\n L = read_layer(filename)\n\n assert same_API(L, V, exclude=exclude)\n assert same_API(L, R, exclude=exclude)\n\n def test_keywords_file(self):\n \"\"\"Keywords can be written and read\n \"\"\"\n\n kwd_filename = unique_filename(suffix='.keywords')\n keywords = {'caption': 'Describing the layer',\n 'category': 'impact',\n 'subcategory': 'flood',\n 'layer': None,\n 'with spaces': 'trailing_ws '}\n\n write_keywords(keywords, kwd_filename)\n msg = 'Keywords file %s was not created' % kwd_filename\n assert os.path.isfile(kwd_filename), msg\n\n x = read_keywords(kwd_filename)\n os.remove(kwd_filename)\n\n assert isinstance(x, dict)\n\n # Check keyword names\n for key in x:\n msg = 'Read unexpected key %s' % key\n assert key in keywords, msg\n\n for key in keywords:\n msg = 'Expected key %s was not read from %s' % (key,\n kwd_filename)\n assert key in x, msg\n\n # Check keyword values\n for key in keywords:\n refval = keywords[key]\n newval = x[key]\n\n if refval is None:\n assert newval is None\n else:\n msg = ('Expected value %s was not read from %s. '\n 'I got %s' % (refval, kwd_filename, newval))\n assert refval.strip() == newval, msg\n\n # Check catching of wrong extension\n kwd_filename = unique_filename(suffix='.xxxx')\n try:\n write_keywords(keywords, kwd_filename)\n except:\n pass\n else:\n msg = 'Should have raised assertion error for wrong extension'\n raise Exception(msg)\n\n def test_empty_keywords_file(self):\n \"\"\"Empty keywords can be handled\n \"\"\"\n\n kwd_filename = unique_filename(suffix='.keywords')\n write_keywords({}, kwd_filename)\n\n msg = 'Keywords file %s was not created' % kwd_filename\n assert os.path.isfile(kwd_filename), msg\n\n x = read_keywords(kwd_filename)\n os.remove(kwd_filename)\n\n assert isinstance(x, dict)\n assert len(x) == 0\n\n def test_keywords_with_colon(self):\n \"\"\"Keywords and values with colons raise error messages\n \"\"\"\n\n # Colon in key\n kwd_filename = unique_filename(suffix='.keywords')\n keywords = {'with_a_colon:in_it': 'value'} # This one is illegal\n\n try:\n write_keywords(keywords, kwd_filename)\n except AssertionError:\n pass\n else:\n msg = 'Colon in keywords key %s was not caught' % keywords\n raise Exception(msg)\n\n # Colon in value\n kwd_filename = unique_filename(suffix='.keywords')\n keywords = {'with_a_colon': 'take: that!'} # This one is illegal\n\n try:\n write_keywords(keywords, kwd_filename)\n except AssertionError:\n pass\n else:\n msg = 'Colon in keywords value %s was not caught' % keywords\n raise Exception(msg)\n\n def test_bounding_box_conversions(self):\n \"\"\"Bounding boxes can be converted between list and string\n \"\"\"\n\n # Good ones\n for x in [[105, -7, 108, -5],\n [106.5, -6.5, 107, -6],\n [94.972335, -11.009721, 141.014, 6.073612333333],\n [105.3, -8.5, 110.0, -5.5],\n [105.6, -7.8, 110.5, -5.1]]:\n bbox_string = bboxlist2string(x)\n bbox_list = bboxstring2list(bbox_string)\n\n assert numpy.allclose(x, bbox_list, rtol=1.0e-6, atol=1.0e-6)\n\n for x in ['105,-7,108,-5',\n '106.5, -6.5, 107,-6',\n '94.972335,-11.009721,141.014,6.073612333333']:\n bbox_list = bboxstring2list(x)\n\n # Check that numbers are numerically consistent\n assert numpy.allclose([float(z) for z in x.split(',')],\n bbox_list, rtol=1.0e-6, atol=1.0e-6)\n\n # Bad ones\n for bbox in [[105, -7, 'x', -5],\n [106.5, -6.5, -6],\n [94.972335, 0, -11.009721, 141.014, 6]]:\n try:\n bbox_string = bboxlist2string(bbox)\n except:\n pass\n else:\n msg = 'Should have raised exception'\n raise Exception(msg)\n\n for x in ['106.5,-6.5,-6',\n '106.5,-6.5,-6,4,10',\n '94.972335,x,141.014,6.07']:\n try:\n bbox_list = bboxstring2list(x)\n except:\n pass\n else:\n msg = 'Should have raised exception: %s' % x\n raise Exception(msg)\n\n def test_bounding_box_intersection(self):\n \"\"\"Intersections of bounding boxes work\n \"\"\"\n\n west_java = [105, -7, 108, -5]\n jakarta = [106.5, -6.5, 107, -6]\n\n # Test commutative law\n assert numpy.allclose(bbox_intersection(west_java, jakarta),\n bbox_intersection(jakarta, west_java))\n\n # Test inclusion\n assert numpy.allclose(bbox_intersection(west_java, jakarta), jakarta)\n\n # Realistic ones\n bbox1 = [94.972335, -11.009721, 141.014, 6.073612333333]\n bbox2 = [105.3, -8.5, 110.0, -5.5]\n bbox3 = [105.6, -7.8, 110.5, -5.1]\n\n ref1 = [max(bbox1[0], bbox2[0]),\n max(bbox1[1], bbox2[1]),\n min(bbox1[2], bbox2[2]),\n min(bbox1[3], bbox2[3])]\n assert numpy.allclose(bbox_intersection(bbox1, bbox2), ref1)\n assert numpy.allclose(bbox_intersection(bbox1, bbox2), bbox2)\n\n ref2 = [max(bbox3[0], bbox2[0]),\n max(bbox3[1], bbox2[1]),\n min(bbox3[2], bbox2[2]),\n min(bbox3[3], bbox2[3])]\n assert numpy.allclose(bbox_intersection(bbox3, bbox2), ref2)\n assert numpy.allclose(bbox_intersection(bbox2, bbox3), ref2)\n\n # Multiple boxes\n assert numpy.allclose(bbox_intersection(bbox1, bbox2, bbox3),\n bbox_intersection(ref1, ref2))\n\n assert numpy.allclose(bbox_intersection(bbox1, bbox2, bbox3,\n west_java, jakarta),\n jakarta)\n\n # From actual example\n b1 = [94.972335000000001, -11.009721000000001,\n 141.014002, 6.0736119999999998]\n b2 = (95.059660952000002, -10.997409961000001,\n 141.00132578099999, 5.9109226959999983)\n b3 = (94.972335000000001, -11.009721000000001,\n 141.0140016666665, 6.0736123333332639)\n\n res = bbox_intersection(b1, b2, b3)\n\n # Empty intersection should return None\n assert bbox_intersection(bbox2, [50, 2, 53, 4]) is None\n\n # Deal with invalid boxes\n try:\n bbox_intersection(bbox1, [53, 2, 40, 4])\n except AssertionError:\n pass\n else:\n msg = 'Should have raised exception'\n raise Exception(msg)\n\n try:\n bbox_intersection(bbox1, [50, 7, 53, 4])\n except AssertionError:\n pass\n else:\n msg = 'Should have raised exception'\n raise Exception(msg)\n\n try:\n bbox_intersection(bbox1, 'blko ho skrle')\n except AssertionError:\n pass\n else:\n msg = 'Should have raised exception'\n raise Exception(msg)\n\n try:\n bbox_intersection(bbox1)\n except AssertionError:\n pass\n else:\n msg = 'Should have raised exception'\n raise Exception(msg)\n\n try:\n bbox_intersection('')\n except AssertionError:\n pass\n else:\n msg = 'Should have raised exception'\n raise Exception(msg)\n\n try:\n bbox_intersection()\n except AssertionError:\n pass\n else:\n msg = 'Should have raised exception'\n raise Exception(msg)\n\n def test_minimal_bounding_box(self):\n \"\"\"Bounding box minimal size can be controlled\n \"\"\"\n\n big = (95.06, -11.0, 141.0, 5.9)\n mid = [103.28, -8.46, 109.67, -4.68]\n sml = (106.818998, -6.18585170, 106.82264510, -6.1810)\n\n min_res = 0.008333333333000\n eps = 1.0e-4\n\n # Check that sml box is actually too small\n assert sml[2] - sml[0] < min_res\n assert sml[3] - sml[1] < min_res\n\n for bbox in [big, mid, sml]:\n # Calculate minimal bounding box\n adjusted_bbox = minimal_bounding_box(bbox, min_res, eps=eps)\n\n # Check that adjusted box exceeds minimal resolution\n assert adjusted_bbox[2] - adjusted_bbox[0] > min_res\n assert adjusted_bbox[3] - adjusted_bbox[1] > min_res\n\n # Check that if box was adjusted eps was applied\n if bbox[2] - bbox[0] <= min_res:\n assert numpy.allclose(adjusted_bbox[2] - adjusted_bbox[0],\n min_res + (2 * eps))\n\n if bbox[3] - bbox[1] <= min_res:\n assert numpy.allclose(adjusted_bbox[3] - adjusted_bbox[1],\n min_res + (2 * eps))\n\n # Check that input box was not changed\n assert adjusted_bbox is not bbox\n\n def test_buffered_bounding_box(self):\n \"\"\"Bounding box can be buffered\n \"\"\"\n\n big = (95.06, -11.0, 141.0, 5.9)\n mid = [103.28, -8.46, 109.67, -4.68]\n sml = (106.818998, -6.18585170, 106.82264510, -6.1810)\n\n for bbox in [big, mid, sml]:\n\n # Set common resolution which is bigger than the smallest box\n resolution = (0.1, 0.2)\n\n dx = bbox[2] - bbox[0]\n dy = bbox[3] - bbox[1]\n\n # Calculate minimal bounding box\n adjusted_bbox = buffered_bounding_box(bbox, resolution)\n\n # Check that adjusted box exceeds minimal resolution\n assert adjusted_bbox[2] - adjusted_bbox[0] > 2 * resolution[0]\n assert adjusted_bbox[3] - adjusted_bbox[1] > 2 * resolution[1]\n\n # Check that input box was not changed\n assert adjusted_bbox is not bbox\n\n def test_array2wkt(self):\n \"\"\"Conversion to wkt data works\n\n It should create something like this\n 'POLYGON((0 1, 2 3, 4 5, 6 7, 8 9))'\n \"\"\"\n\n # Arrays first\n A = numpy.arange(10)\n A = A.reshape(5, 2)\n\n wkt = array2wkt(A, geom_type='POLYGON')\n assert wkt.startswith('POLYGON((')\n fields = wkt[9:-2].split(',')\n for i, field in enumerate(fields):\n x, y = field.split()\n assert numpy.allclose(A[i, :], [float(x), float(y)])\n\n # Then list\n wkt = array2wkt(A.tolist(), geom_type='POLYGON')\n assert wkt.startswith('POLYGON((')\n fields = wkt[9:-2].split(',')\n for i, field in enumerate(fields):\n x, y = field.split()\n assert numpy.allclose(A[i, :], [float(x), float(y)])\n\n # Then a linestring example (note one less bracket)\n wkt = array2wkt(A, geom_type='LINESTRING')\n assert wkt.startswith('LINESTRING(')\n fields = wkt[11:-1].split(',')\n for i, field in enumerate(fields):\n x, y = field.split()\n assert numpy.allclose(A[i, :], [float(x), float(y)])\n\n def test_polygon_area(self):\n \"\"\"Polygon areas are computed correctly\n \"\"\"\n\n # Create closed simple polygon (counter clock wise)\n P = numpy.array([[0, 0], [1, 0], [1, 1], [0, 1], [0, 0]])\n A = calculate_polygon_area(P)\n msg = 'Calculated area was %f, expected 1.0 deg^2' % A\n assert numpy.allclose(A, 1), msg\n\n # Create closed simple polygon (clock wise)\n P = numpy.array([[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]])\n A = calculate_polygon_area(P)\n msg = 'Calculated area was %f, expected 1.0 deg^2' % A\n assert numpy.allclose(A, 1), msg\n\n A = calculate_polygon_area(P, signed=True)\n msg = 'Calculated signed area was %f, expected -1.0 deg^2' % A\n assert numpy.allclose(A, -1), msg\n\n # Not starting at zero\n # Create closed simple polygon (counter clock wise)\n P = numpy.array([[168, -2], [169, -2], [169, -1],\n [168, -1], [168, -2]])\n A = calculate_polygon_area(P)\n\n msg = 'Calculated area was %f, expected 1.0 deg^2' % A\n assert numpy.allclose(A, 1), msg\n\n # Realistic polygon\n filename = '%s/%s' % (TESTDATA, 'test_polygon.shp')\n layer = read_layer(filename)\n geometry = layer.get_geometry()\n attributes = layer.get_data()\n\n P = geometry[0]\n A = calculate_polygon_area(P)\n\n # Verify against area reported by qgis (only three decimals)\n qgis_area = 0.003\n assert numpy.allclose(A, qgis_area, atol=1.0e-3)\n\n # Verify against area reported by ESRI ARC (very good correspondence)\n esri_area = 2.63924787273461e-3\n assert numpy.allclose(A, esri_area, rtol=0, atol=1.0e-10)\n\n def test_polygon_centroids(self):\n \"\"\"Polygon centroids are computed correctly\n \"\"\"\n\n # Create closed simple polygon (counter clock wise)\n P = numpy.array([[0, 0], [1, 0], [1, 1], [0, 1], [0, 0]])\n C = calculate_polygon_centroid(P)\n\n msg = ('Calculated centroid was (%f, %f), expected '\n '(0.5, 0.5)' % tuple(C))\n assert numpy.allclose(C, [0.5, 0.5]), msg\n\n # Create closed simple polygon (clock wise)\n # FIXME (Ole): Not sure whether to raise an exception or\n # to return absolute value in this case\n P = numpy.array([[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]])\n C = calculate_polygon_centroid(P)\n\n msg = ('Calculated centroid was (%f, %f), expected '\n '(0.5, 0.5)' % tuple(C))\n assert numpy.allclose(C, [0.5, 0.5]), msg\n\n # Not starting at zero\n # Create closed simple polygon (counter clock wise)\n P = numpy.array([[168, -2], [169, -2], [169, -1],\n [168, -1], [168, -2]])\n C = calculate_polygon_centroid(P)\n\n msg = ('Calculated centroid was (%f, %f), expected '\n '(168.5, -1.5)' % tuple(C))\n assert numpy.allclose(C, [168.5, -1.5]), msg\n\n # Realistic polygon\n filename = '%s/%s' % (TESTDATA, 'test_polygon.shp')\n layer = read_layer(filename)\n geometry = layer.get_geometry()\n attributes = layer.get_data()\n\n P = geometry[0]\n C = calculate_polygon_centroid(P)\n\n # Check against reference centroid\n reference_centroid = [106.7036938, -6.134533855] # From qgis\n assert numpy.allclose(C, reference_centroid, rtol=1.0e-8)\n\n # Store centroid to file (to e.g. check with qgis)\n out_filename = unique_filename(prefix='test_centroid', suffix='.shp')\n V = Vector(data=None,\n projection=DEFAULT_PROJECTION,\n geometry=[C],\n name='Test centroid')\n V.write_to_file(out_filename)\n\n # Another realistic polygon\n P = numpy.array([[106.7922547, -6.2297884],\n [106.7924589, -6.2298087],\n [106.7924538, -6.2299127],\n [106.7922547, -6.2298899],\n [106.7922547, -6.2297884]])\n\n C = calculate_polygon_centroid(P)\n\n # Check against reference centroid from qgis\n reference_centroid = [106.79235602697445, -6.229849764722536]\n msg = 'Got %s but expected %s' % (str(C), str(reference_centroid))\n assert numpy.allclose(C, reference_centroid, rtol=1.0e-8), msg\n\n # Store centroid to file (to e.g. check with qgis)\n out_filename = unique_filename(prefix='test_centroid', suffix='.shp')\n V = Vector(data=None,\n projection=DEFAULT_PROJECTION,\n geometry=[C],\n name='Test centroid')\n V.write_to_file(out_filename)\n\n def test_geotransform2bbox(self):\n \"\"\"Bounding box can be extracted from geotransform\n \"\"\"\n\n M = 5\n N = 10\n for gt in GEOTRANSFORMS:\n bbox = geotransform2bbox(gt, M, N)\n\n # FIXME: Need better tests here, but this is better than nothing\n\n # Lower bounds\n assert bbox[0] == gt[0]\n\n # Upper bounds\n assert bbox[3] == gt[3]\n\n def test_geotransform2resolution(self):\n \"\"\"Resolution can be extracted from geotransform\n \"\"\"\n\n for gt in GEOTRANSFORMS:\n res = geotransform2resolution(gt, isotropic=False)\n assert len(res) == 2\n assert numpy.allclose(res[0], gt[1], rtol=0, atol=1.0e-12)\n assert numpy.allclose(res[1], - gt[5], rtol=0, atol=1.0e-12)\n\n res = geotransform2resolution(gt, isotropic=True)\n assert numpy.allclose(res, gt[1], rtol=0, atol=1.0e-12)\n assert numpy.allclose(res, - gt[5], rtol=0, atol=1.0e-12)\n\nif __name__ == '__main__':\n suite = unittest.makeSuite(Test_IO, 'test')\n runner = unittest.TextTestRunner(verbosity=2)\n runner.run(suite)\n" }, { "alpha_fraction": 0.6597014665603638, "alphanum_fraction": 0.683582067489624, "avg_line_length": 46.57143020629883, "blob_id": "9988d32f822ef17342d0f3b0e8d8c9a2e1ec9786", "content_id": "5170284add7188dcedcaa6465ac0963b358e82b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 335, "license_type": "no_license", "max_line_length": 77, "num_lines": 7, "path": "/docs/development/style-guides.rst", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "Style guides and other resources\n================================\n\n* Python style guide: http://www.python.org/dev/peps/pep-0008\n* Python documentation guide: http://www.python.org/dev/peps/pep-0257\n* Git commands: http://www.kernel.org/pub/software/scm/git/docs/everyday.html\n* Git guide: http://spheredev.org/wiki/Git_for_the_lazy\n\n\n" }, { "alpha_fraction": 0.5291599631309509, "alphanum_fraction": 0.5430711507797241, "avg_line_length": 31.224138259887695, "blob_id": "7a136d168235d0789b8feca3947dce5f9697253b", "content_id": "3b184cc9206d9cb7811775eb3a8abec8d6272590", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1869, "license_type": "no_license", "max_line_length": 74, "num_lines": 58, "path": "/impact/plugins/earthquake/allen_fatality_model.py", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "from impact.plugins.core import FunctionProvider\nfrom impact.storage.raster import Raster\nimport numpy\n\n\nclass EarthquakeFatalityFunction(FunctionProvider):\n \"\"\"Risk plugin for earthquake damage\n\n :author Allen\n :rating 1\n :param requires category=='hazard' and \\\n subcategory.startswith('earthquake') and \\\n layer_type=='raster'\n :param requires category=='exposure' and \\\n subcategory.startswith('population') and \\\n layer_type=='raster'\n \"\"\"\n\n @staticmethod\n def run(layers,\n a=0.97429, b=11.037):\n \"\"\"Risk plugin for earthquake fatalities\n\n Input\n layers: List of layers expected to contain\n H: Raster layer of MMI ground shaking\n P: Raster layer of population data on the same grid as H\n \"\"\"\n\n # Identify input layers\n intensity = layers[0]\n population = layers[1]\n\n # Extract data\n H = intensity.get_data(nan=0)\n P = population.get_data(nan=0)\n\n # Calculate impact\n F = 10 ** (a * H - b) * P\n\n # Generate text with result for this study\n count = numpy.nansum(F.flat)\n total = numpy.nansum(P.flat)\n\n # Create report\n caption = ('<table border=\"0\" width=\"320px\">'\n ' <tr><td>%s&#58;</td><td>%i</td></tr>'\n ' <tr><td>%s&#58;</td><td>%i</td></tr>'\n '</table>' % ('Jumlah Penduduk', int(total),\n 'Perkiraan Orang Meninggal', int(count)))\n\n # Create new layer and return\n R = Raster(F,\n projection=population.get_projection(),\n geotransform=population.get_geotransform(),\n name='Estimated fatalities',\n keywords={'caption': caption})\n return R\n" }, { "alpha_fraction": 0.7811965942382812, "alphanum_fraction": 0.788034200668335, "avg_line_length": 28.149999618530273, "blob_id": "ed8af59dab41269c4f1fa9faabd85db0531b8378", "content_id": "b261b804309d00cbfb118c734c676ec2ffda6607", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 585, "license_type": "no_license", "max_line_length": 133, "num_lines": 20, "path": "/docs/intro/glossary.rst", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "Glossary\n--------\n\nMagnitude\n The energy released at the source of the earthquake.\n\nHazard Level\n Ground acceleration, Maximum water depth, Ash Thickness,Acceleration at selected frequencies or modes are examples of Hazard levels.\n\nExposure Level\n Population density or Infrastructures (house of building type or dollars per sqm)\n\nImpact\n Number of fatalities / Dollar Losses / Buildings Collapsed for example\n\nRisk\n Impact with an associated probability - how bad and how often\n\nReturn Period\n Inverse of probability. e.g. 100 year flood - flood event of probability of 1% per year\n\n\n" }, { "alpha_fraction": 0.5387499928474426, "alphanum_fraction": 0.5610937476158142, "avg_line_length": 33.40860366821289, "blob_id": "e6321d6982599b18b9eef42790da440034770197", "content_id": "dd3c20ee9cfd9f7cecfc68f99337a2fc77f98800", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6400, "license_type": "no_license", "max_line_length": 195, "num_lines": 186, "path": "/impact/plugins/earthquake/usgs_fatality_model.py", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "from impact.plugins.core import FunctionProvider\nfrom impact.storage.raster import Raster\n\nimport scipy\nimport scipy.stats\nimport numpy\n\n\nclass USGSFatalityFunction(FunctionProvider):\n \"\"\"Risk plugin for earthquake damage based on empirical results\n\n :author Hadi Ghasemi\n :rating 2\n\n :param requires category == 'hazard' and \\\n subcategory == 'earthquake' and \\\n layer_type == 'raster'\n\n :param requires category == 'exposure' and \\\n subcategory == 'population' and \\\n layer_type == 'raster' and \\\n disabled == 'notinuse'\n \"\"\"\n\n @staticmethod\n def run(layers,\n teta=14.05, beta=0.17, zeta=2.15):\n \"\"\"Risk plugin for earthquake fatalities\n\n Input\n H: Numerical array of hazard data\n E: Numerical array of exposure data\n\n Algorithm and coefficients are from:\n\n An Empirical Model for Global Earthquake Fatality Estimation.\n Kishor Jaiswal and David Wald.\n Earthquake Spectra, Volume 26, No. 4, pages 1017-1037, November 2010.\n\n\n teta=14.05, beta=0.17, zeta=2.1 # Coefficients for Indonesia.\n\n\n \"\"\"\n\n # Identify input layers\n intensity = layers[0]\n population = layers[1]\n\n print intensity.get_resolution()\n print population.get_resolution()\n\n # Extract data\n H = intensity.get_data(nan=0) # Ground Shaking\n P = population.get_data(nan=0) # Population Density\n\n import cPickle\n name = intensity.get_name()\n print name\n fid = open('/home/nielso/population_%s.pck' % name, 'wb')\n cPickle.dump(P, fid)\n fid.close()\n\n fid = open('/home/nielso/intensity_%s.pck' % name, 'wb')\n cPickle.dump(H, fid)\n fid.close()\n\n # Calculate population affected by each MMI level\n mmi_range = range(2, 10)\n number_of_people_affected = {}\n for mmi in mmi_range:\n mask = numpy.logical_and(mmi - 0.5 < H,\n H <= mmi + 0.5)\n I = numpy.where(mask, P, 0)\n\n # Generate text with result for this study\n number_of_people_affected[mmi] = numpy.nansum(I.flat)\n\n # Calculate impact according to equation (1) in the Kishor and Wald 2010\n logHazard = 1 / beta * scipy.log(H / teta)\n\n # Convert array to be standard floats expected by cdf\n arrayout = numpy.array([[float(value) for value in row]\n for row in logHazard])\n F = scipy.stats.norm.cdf(arrayout * P)\n\n # Stats\n total = numpy.nansum(P.flat)\n fatalities = numpy.nansum(F)\n print 'Total', total\n print 'Estimated fatalities', fatalities\n print 'Min', numpy.amin(F)\n print 'Max', numpy.amax(F)\n\n # Generate text with result for this study\n caption = generate_exposure_table(mmi_range,\n number_of_people_affected)\n caption += generate_fatality_table(fatalities)\n\n # Create new layer and return\n R = Raster(F,\n projection=population.get_projection(),\n geotransform=population.get_geotransform(),\n keywords={'caption': caption},\n name='Estimated fatalities')\n return R\n\n def generate_style(self, data):\n \"\"\"Generates and SLD file based on the data values\n \"\"\"\n\n s = \"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<sld:StyledLayerDescriptor xmlns=\"http://www.opengis.net/sld\" xmlns:sld=\"http://www.opengis.net/sld\" xmlns:ogc=\"http://www.opengis.net/ogc\" xmlns:gml=\"http://www.opengis.net/gml\" version=\"1.0.0\">\n <sld:NamedLayer>\n <sld:Name>Estimated Fatalities</sld:Name>\n <sld:UserStyle>\n <sld:Name>Estimated Fatalities</sld:Name>\n <sld:Title>Estimated Earthquake Fatalities</sld:Title>\n <sld:Abstract>Estimated Fatalities from ground shaking</sld:Abstract>\n <sld:FeatureTypeStyle>\n <sld:Name>Estimated Fatalities</sld:Name>\n <sld:Rule>\n <sld:RasterSymbolizer>\n <sld:Geometry>\n <ogc:PropertyName>geom</ogc:PropertyName>\n </sld:Geometry>\n <sld:ChannelSelection>\n <sld:GrayChannel>\n <sld:SourceChannelName>1</sld:SourceChannelName>\n </sld:GrayChannel>\n </sld:ChannelSelection>\n <sld:ColorMap>\n <sld:ColorMapEntry color=\"#ffffff\" opacity=\"0\" quantity=\"-9999.0\"/>\n <sld:ColorMapEntry color=\"#38A800\" opacity=\"0\" quantity=\"0.01\"/>\n <sld:ColorMapEntry color=\"#38A800\" quantity=\"0.02\"/>\n <sld:ColorMapEntry color=\"#79C900\" quantity=\"0.05\"/>\n <sld:ColorMapEntry color=\"#CEED00\" quantity=\"0.1\"/>\n <sld:ColorMapEntry color=\"#FFCC00\" quantity=\"0.2\"/>\n <sld:ColorMapEntry color=\"#FF6600\" quantity=\"0.3\"/>\n <sld:ColorMapEntry color=\"#FF0000\" quantity=\"0.5\"/>\n <sld:ColorMapEntry color=\"#7A0000\" quantity=\"0.9\"/>\n <sld:ColorMapEntry color=\"#DDDDDD\" quantity=\"5.0\"/>\n <sld:ColorMapEntry color=\"#FFFFFF\" quantity=\"10.0\"/>\n </sld:ColorMap>\n </sld:RasterSymbolizer>\n </sld:Rule>\n </sld:FeatureTypeStyle>\n </sld:UserStyle>\n </sld:NamedLayer>\n</sld:StyledLayerDescriptor>\n\n \"\"\"\n\n return s\n\n\ndef generate_exposure_table(mmi_range,\n number_of_people_affected):\n \"\"\"Helper to make html report\n \"\"\"\n\n header = 'Jumlah Orang yg terkena dampak (x1000)'\n caption = ('<font size=\"3\"><table border=\"0\" width=\"400px\">'\n ' <tr><td><b>MMI</b></td><td><b>%s</b></td></tr>'\n % header)\n\n for mmi in mmi_range:\n caption += (' <tr><td>%i&#58;</td><td>%i</td></tr>'\n % (mmi,\n number_of_people_affected[mmi] / 1000))\n caption += '<tr></tr>'\n caption += '</table></font>'\n\n return caption\n\n\ndef generate_fatality_table(fatalities):\n \"\"\"Helper to make html report\n \"\"\"\n\n caption = ('<br>'\n '<font size=\"3\"><table border=\"0\" width=\"300px\">'\n ' <tr><td><b>Jumlah Perkiraan Kematian</b></td>'\n ' <td><b>%i</b></td></tr>'\n '</table></font>' % fatalities)\n return caption\n" }, { "alpha_fraction": 0.6610342860221863, "alphanum_fraction": 0.6774193644523621, "avg_line_length": 29.515625, "blob_id": "b91c2e7b496e6f7ff83aeff8dd8cdd73703b4f55", "content_id": "784d632ef9567791981583d2b77d9338dfba0086", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1953, "license_type": "no_license", "max_line_length": 77, "num_lines": 64, "path": "/impact/models.py", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "from __future__ import division\nfrom django.db import models\nfrom django.contrib.auth.models import User\nimport datetime\n\n\nclass Calculation(models.Model):\n \"\"\"Calculation model\n \"\"\"\n\n user = models.ForeignKey(User)\n success = models.BooleanField()\n run_date = models.DateTimeField()\n run_duration = models.FloatField()\n impact_function = models.CharField(max_length=255, null=True, blank=True)\n impact_function_source = models.TextField()\n exposure_server = models.URLField(null=True, blank=True)\n exposure_layer = models.CharField(max_length=255, null=True, blank=True)\n hazard_server = models.URLField(null=True, blank=True)\n hazard_layer = models.CharField(max_length=255, null=True, blank=True)\n bbox = models.CharField(max_length=255, null=True, blank=True)\n errors = models.TextField()\n stacktrace = models.TextField(null=True, blank=True)\n layer = models.CharField(max_length=255, null=True, blank=True)\n\n @property\n def url(self):\n return self.layer.url\n\n def get_absolute_url(self):\n return self.layer.get_absolute_url()\n\n def __unicode__(self):\n if self.success:\n name = 'Sucessful Calculation'\n else:\n name = 'Failed Calculation'\n return '%s at %s' % (name, self.run_date)\n\n\nclass Server(models.Model):\n name = models.CharField(max_length=255)\n url = models.URLField()\n\n def __unicode__(self):\n return self.name\n\n\nclass Workspace(models.Model):\n user = models.ForeignKey(User)\n servers = models.ManyToManyField(Server)\n\n def __unicode__(self):\n return self.user.username\n\n\ndef duration(sender, **kwargs):\n instance = kwargs['instance']\n now = datetime.datetime.now()\n td = now - instance.run_date\n duration = td.microseconds / 1000000 + td.seconds + td.days * 24 * 3600\n instance.run_duration = round(duration, 2)\n\nmodels.signals.pre_save.connect(duration, sender=Calculation)\n" }, { "alpha_fraction": 0.5405616164207458, "alphanum_fraction": 0.565912663936615, "avg_line_length": 38.75193786621094, "blob_id": "91097267a5121da4dc6761ef371c072db7922058", "content_id": "d83ec93d3f3df44821a94ed7b1581c9a4b0c8542", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5128, "license_type": "no_license", "max_line_length": 195, "num_lines": 129, "path": "/impact/plugins/flood/HKV_flood_study.py", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "import numpy\n\nfrom impact.plugins.core import FunctionProvider\nfrom impact.storage.raster import Raster\n\n\nclass FloodImpactFunction(FunctionProvider):\n \"\"\"Risk plugin for flood impact\n\n :author HKV\n :rating 1\n :param requires category=='hazard' and \\\n subcategory.startswith('flood') and \\\n layer_type=='raster' and \\\n unit=='m'\n :param requires category=='exposure' and \\\n subcategory.startswith('population') and \\\n layer_type=='raster'\n \"\"\"\n\n @staticmethod\n def run(layers):\n \"\"\"Risk plugin for earthquake fatalities\n\n Input\n layers: List of layers expected to contain\n H: Raster layer of flood depth\n P: Raster layer of population data on the same grid as H\n \"\"\"\n\n # Depth above which people are regarded affected [m]\n threshold = 0.1\n thresholds = [0.1, 0.2, 0.3, 0.5, 0.8, 1.0]\n\n # Identify hazard and exposure layers\n inundation = layers[0] # Flood inundation [m]\n population = layers[1] # Population density [people/100000 m^2]\n\n # Extract data as numeric arrays\n D = inundation.get_data(nan=0.0) # Depth\n\n # Calculate impact as population exposed to depths > threshold\n if population.get_resolution(native=True, isotropic=True) < 0.0005:\n # Keep this for backwards compatibility just a little while\n # This uses the original custom population set and\n # serves as a reference\n\n P = population.get_data(nan=0.0) # Population density\n pixel_area = 2500\n I = numpy.where(D > threshold, P, 0) / 100000.0 * pixel_area\n else:\n # This is the new generic way of scaling (issue #168 and #172)\n P = population.get_data(nan=0.0, scaling=True)\n I = numpy.where(D > threshold, P, 0)\n\n # Generate text with result for this study\n number_of_people_affected = numpy.nansum(I.flat)\n caption = ('%i people affected by flood levels greater '\n 'than %i cm' % (number_of_people_affected,\n threshold * 100))\n\n # Create report\n caption = ('<table border=\"0\" width=\"320px\">'\n ' <tr><th><b>%s</b></th><th><b>%s</b></th></th>'\n ' <tr></tr>' % ('Min flood levels', 'People affected'))\n\n counts = []\n for i, threshold in enumerate(thresholds):\n I_tmp = numpy.where(D > threshold, P, 0)\n counts.append(numpy.nansum(I_tmp.flat))\n\n caption += ' <tr><td>%s m</td><td>%i</td></tr>' % (threshold,\n counts[i])\n\n caption += '</table>'\n\n # Create raster object and return\n R = Raster(I,\n projection=inundation.get_projection(),\n geotransform=inundation.get_geotransform(),\n name='People affected',\n keywords={'caption': caption})\n return R\n\n def generate_style(self, data):\n \"\"\"Generates and SLD file based on the data values\n \"\"\"\n\n s = \"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<sld:StyledLayerDescriptor xmlns=\"http://www.opengis.net/sld\" xmlns:sld=\"http://www.opengis.net/sld\" xmlns:ogc=\"http://www.opengis.net/ogc\" xmlns:gml=\"http://www.opengis.net/gml\" version=\"1.0.0\">\n <sld:NamedLayer>\n <sld:Name>People affected by more than 1m of inundation</sld:Name>\n <sld:UserStyle>\n <sld:Name>People affected by more than 1m of inundation</sld:Name>\n <sld:Title>People Affected By More Than 1m Of Inundation</sld:Title>\n <sld:Abstract>People Affected By More Than 1m Of Inundation</sld:Abstract>\n <sld:FeatureTypeStyle>\n <sld:Name>People affected by more than 1m of inundation</sld:Name>\n <sld:Rule>\n <sld:RasterSymbolizer>\n <sld:Geometry>\n <ogc:PropertyName>geom</ogc:PropertyName>\n </sld:Geometry>\n <sld:ChannelSelection>\n <sld:GrayChannel>\n <sld:SourceChannelName>1</sld:SourceChannelName>\n </sld:GrayChannel>\n </sld:ChannelSelection>\n <sld:ColorMap>\n <sld:ColorMapEntry color=\"#ffffff\" opacity=\"0\" quantity=\"-9999.0\"/>\n <sld:ColorMapEntry color=\"#38A800\" opacity=\"0\" quantity=\"2\"/>\n <sld:ColorMapEntry color=\"#38A800\" quantity=\"5\"/>\n <sld:ColorMapEntry color=\"#79C900\" quantity=\"10\"/>\n <sld:ColorMapEntry color=\"#CEED00\" quantity=\"20\"/>\n <sld:ColorMapEntry color=\"#FFCC00\" quantity=\"50\"/>\n <sld:ColorMapEntry color=\"#FF6600\" quantity=\"100\"/>\n <sld:ColorMapEntry color=\"#FF0000\" quantity=\"200\"/>\n <sld:ColorMapEntry color=\"#7A0000\" quantity=\"300\"/>\n </sld:ColorMap>\n </sld:RasterSymbolizer>\n </sld:Rule>\n </sld:FeatureTypeStyle>\n </sld:UserStyle>\n </sld:NamedLayer>\n</sld:StyledLayerDescriptor>\n\n \"\"\"\n\n return s\n" }, { "alpha_fraction": 0.5328233242034912, "alphanum_fraction": 0.535941481590271, "avg_line_length": 34.16520309448242, "blob_id": "fdf1b962f3e972c73e2ed57be7ce55df89358b96", "content_id": "957e4dae69221d1ac00d97215b942d03ba65e3e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 24053, "license_type": "no_license", "max_line_length": 78, "num_lines": 684, "path": "/impact/storage/vector.py", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "\"\"\"Class Vector\n\"\"\"\n\nimport os\nimport numpy\nfrom osgeo import ogr, gdal\nfrom impact.storage.projection import Projection\nfrom impact.storage.utilities import DRIVER_MAP, TYPE_MAP\nfrom impact.storage.utilities import read_keywords\nfrom impact.storage.utilities import write_keywords\nfrom impact.storage.utilities import get_geometry_type\nfrom impact.storage.utilities import is_sequence\nfrom impact.storage.utilities import array2wkt\nfrom impact.storage.utilities import calculate_polygon_centroid\nfrom impact.storage.utilities import geometrytype2string\n\n\n# FIXME (Ole): Consider using pyshp to read and write shapefiles\n# See http://code.google.com/p/pyshp\nclass Vector:\n \"\"\"Class for abstraction of vector data\n \"\"\"\n\n def __init__(self, data=None, projection=None, geometry=None,\n name='Vector layer', keywords=None):\n \"\"\"Initialise object with either geometry or filename\n\n Input\n data: Can be either\n * a filename of a vector file format known to GDAL\n * List of dictionaries of fields associated with\n point coordinates\n * None\n projection: Geospatial reference in WKT format.\n Only used if geometry is provide as a numeric array,\n geometry: A list of either point coordinates or polygons\n name: Optional name for layer.\n Only used if geometry is provide as a numeric array\n keywords: Optional dictionary with keywords that describe the\n layer. When the layer is stored, these keywords will\n be written into an associated file with extension\n .keywords.\n\n Keywords can for example be used to display text\n about the layer in a web application.\n\n Note that if data is a filename, all other arguments are ignored\n as they will be inferred from the file.\n\n The geometry type will be inferred from the dimensions of geometry.\n If each entry is one set of coordinates the type will be ogr.wkbPoint,\n if it is an array of coordinates the type will be ogr.wkbPolygon.\n \"\"\"\n\n if data is None and projection is None and geometry is None:\n # Instantiate empty object\n self.name = name\n self.projection = None\n self.geometry = None\n self.geometry_type = None\n self.filename = None\n self.data = None\n self.extent = None\n self.keywords = {}\n return\n\n if isinstance(data, basestring):\n self.read_from_file(data)\n else:\n # Assume that data is provided as sequences provided as\n # arguments to the Vector constructor\n # with extra keyword arguments supplying metadata\n\n self.name = name\n self.filename = None\n\n if keywords is None:\n self.keywords = {}\n else:\n msg = ('Specified keywords must be either None or a '\n 'dictionary. I got %s' % keywords)\n assert isinstance(keywords, dict), msg\n self.keywords = keywords\n\n msg = 'Geometry must be specified'\n assert geometry is not None, msg\n\n msg = 'Geometry must be a sequence'\n assert is_sequence(geometry), msg\n self.geometry = geometry\n\n self.geometry_type = get_geometry_type(geometry)\n\n msg = 'Projection must be specified'\n assert projection is not None, msg\n self.projection = Projection(projection)\n\n self.data = data\n if data is not None:\n msg = 'Data must be a sequence'\n assert is_sequence(data), msg\n\n msg = ('The number of entries in geometry and data '\n 'must be the same')\n assert len(geometry) == len(data), msg\n\n # FIXME: Need to establish extent here\n\n def __str__(self):\n\n g_type_str = geometrytype2string(self.geometry_type)\n return ('Vector data set: %s, %i features, geometry type '\n '%s (%s)' % (self.name,\n len(self),\n str(self.geometry_type),\n g_type_str))\n\n def __len__(self):\n \"\"\"Size of vector layer defined as number of features\n \"\"\"\n\n return len(self.geometry)\n\n def __eq__(self, other, rtol=1.0e-5, atol=1.0e-8):\n \"\"\"Override '==' to allow comparison with other vector objecs\n\n Input\n other: Vector instance to compare to\n rtol, atol: Relative and absolute tolerance.\n See numpy.allclose for details\n \"\"\"\n\n # Check type\n if not isinstance(other, Vector):\n msg = ('Vector instance cannot be compared to %s'\n ' as its type is %s ' % (str(other), type(other)))\n raise TypeError(msg)\n\n # Check projection\n if self.projection != other.projection:\n return False\n\n # Check geometry\n if not numpy.allclose(self.get_geometry(),\n other.get_geometry(),\n rtol=rtol, atol=atol):\n return False\n\n # Check keys\n x = self.get_data()\n y = other.get_data()\n\n for key in x[0]:\n for i in range(len(y)):\n if key not in y[i]:\n return False\n\n for key in y[0]:\n for i in range(len(x)):\n if key not in x[i]:\n return False\n\n # Check data\n for i, a in enumerate(x):\n for key in a:\n if a[key] != y[i][key]:\n # Not equal, try numerical comparison with tolerances\n\n if not numpy.allclose(a[key], y[i][key],\n rtol=rtol, atol=atol):\n return False\n\n # Check keywords\n if self.keywords != other.keywords:\n return False\n\n # Vector layers are identical up to the specified tolerance\n return True\n\n def __ne__(self, other):\n \"\"\"Override '!=' to allow comparison with other projection objecs\n \"\"\"\n return not self == other\n\n def get_name(self):\n return self.name\n\n def get_keywords(self, key=None):\n \"\"\"Return keywords dictionary\n \"\"\"\n if key is None:\n return self.keywords\n else:\n if key in self.keywords:\n return self.keywords[key]\n else:\n msg = ('Keyword %s does not exist in %s: Options are '\n '%s' % (key, self.get_name(), self.keywords.keys()))\n raise Exception(msg)\n\n def get_caption(self):\n \"\"\"Return 'caption' keyword if present. Otherwise ''.\n \"\"\"\n if 'caption' in self.keywords:\n return self.keywords['caption']\n else:\n return ''\n\n def read_from_file(self, filename):\n \"\"\" Read and unpack vector data.\n\n It is assumed that the file contains only one layer with the\n pertinent features. Further it is assumed for the moment that\n all geometries are points.\n\n * A feature is a geometry and a set of attributes.\n * A geometry refers to location and can be point, line, polygon or\n combinations thereof.\n * The attributes or obtained through GetField()\n\n The full OGR architecture is documented at\n * http://www.gdal.org/ogr/ogr_arch.html\n * http://www.gdal.org/ogr/ogr_apitut.html\n\n Examples are at\n * danieljlewis.org/files/2010/09/basicpythonmap.pdf\n * http://invisibleroads.com/tutorials/gdal-shapefile-points-save.html\n * http://www.packtpub.com/article/geospatial-data-python-geometry\n \"\"\"\n\n basename, _ = os.path.splitext(filename)\n\n # Look for any keywords\n self.keywords = read_keywords(basename + '.keywords')\n\n # Determine name\n if 'title' in self.keywords:\n vectorname = self.keywords['title']\n else:\n # Use basename without leading directories as name\n vectorname = os.path.split(basename)[-1]\n\n self.name = vectorname\n self.filename = filename\n self.geometry_type = None # In case there are no features\n\n fid = ogr.Open(filename)\n if fid is None:\n msg = 'Could not open %s' % filename\n raise IOError(msg)\n\n # Assume that file contains all data in one layer\n msg = 'Only one vector layer currently allowed'\n if fid.GetLayerCount() > 1:\n msg = ('WARNING: Number of layers in %s are %i. '\n 'Only the first layer will currently be '\n 'used.' % (filename, fid.GetLayerCount()))\n raise Exception(msg)\n\n layer = fid.GetLayerByIndex(0)\n\n # Get spatial extent\n self.extent = layer.GetExtent()\n\n # Get projection\n p = layer.GetSpatialRef()\n self.projection = Projection(p)\n\n # Get number of features\n N = layer.GetFeatureCount()\n\n # Extract coordinates and attributes for all features\n geometry = []\n data = []\n for i in range(N):\n feature = layer.GetFeature(i)\n if feature is None:\n msg = 'Could not get feature %i from %s' % (i, filename)\n raise Exception(msg)\n\n # Record coordinates ordered as Longitude, Latitude\n G = feature.GetGeometryRef()\n if G is None:\n msg = ('Geometry was None in filename %s ' % filename)\n raise Exception(msg)\n else:\n self.geometry_type = G.GetGeometryType()\n if self.geometry_type == ogr.wkbPoint:\n geometry.append((G.GetX(), G.GetY()))\n elif self.geometry_type == ogr.wkbPolygon:\n ring = G.GetGeometryRef(0)\n M = ring.GetPointCount()\n coordinates = []\n for j in range(M):\n coordinates.append((ring.GetX(j), ring.GetY(j)))\n\n # Record entire polygon ring as an Mx2 numpy array\n geometry.append(numpy.array(coordinates,\n dtype='d',\n copy=False))\n else:\n msg = ('Only point and polygon geometries are supported. '\n 'Geometry in filename %s '\n 'was %s.' % (filename,\n G.GetGeometryType()))\n raise Exception(msg)\n\n # Record attributes by name\n number_of_fields = feature.GetFieldCount()\n fields = {}\n for j in range(number_of_fields):\n name = feature.GetFieldDefnRef(j).GetName()\n\n # FIXME (Ole): Ascertain the type of each field?\n # We need to cast each appropriately?\n # This is issue #66\n #feature_type = feature.GetFieldDefnRef(j).GetType()\n fields[name] = feature.GetField(j)\n #print 'Field', name, feature_type, j, fields[name]\n\n data.append(fields)\n\n # Store geometry coordinates as a compact numeric array\n self.geometry = geometry\n self.data = data\n\n def write_to_file(self, filename):\n \"\"\"Save vector data to file\n\n Input\n filename: filename with extension .shp or .gml\n\n Note, if attribute names are longer than 10 characters they will be\n truncated. This is due to limitations in the shp file driver and has\n to be done here since gdal v1.7 onwards has changed its handling of\n this issue: http://www.gdal.org/ogr/drv_shapefile.html\n \"\"\"\n\n # Check file format\n basename, extension = os.path.splitext(filename)\n\n msg = ('Invalid file type for file %s. Only extensions '\n 'shp or gml allowed.' % filename)\n assert extension == '.shp' or extension == '.gml', msg\n driver = DRIVER_MAP[extension]\n\n # FIXME (Ole): Tempory flagging of GML issue (ticket #18)\n if extension == '.gml':\n msg = ('OGR GML driver does not store geospatial reference.'\n 'This format is disabled for the time being. See '\n 'https://github.com/AIFDR/riab/issues/18')\n raise Exception(msg)\n\n # Derive layername from filename (excluding preceding dirs)\n layername = os.path.split(basename)[-1]\n\n # Get vector data\n geometry = self.get_geometry()\n data = self.get_data()\n\n N = len(geometry)\n\n # Clear any previous file of this name (ogr does not overwrite)\n try:\n os.remove(filename)\n except:\n pass\n\n # Create new file with one layer\n drv = ogr.GetDriverByName(driver)\n if drv is None:\n msg = 'OGR driver %s not available' % driver\n raise Exception(msg)\n\n ds = drv.CreateDataSource(filename)\n if ds is None:\n msg = 'Creation of output file %s failed' % filename\n raise Exception(msg)\n\n lyr = ds.CreateLayer(layername,\n self.projection.spatial_reference,\n self.geometry_type)\n if lyr is None:\n msg = 'Could not create layer %s' % layername\n raise Exception(msg)\n\n # Define attributes if any\n store_attributes = False\n if data is not None:\n if len(data) > 0:\n try:\n fields = data[0].keys()\n except:\n msg = ('Input parameter \"attributes\" was specified '\n 'but it does not contain dictionaries with '\n 'field information as expected. The first'\n 'element is %s' % data[0])\n raise Exception(msg)\n else:\n # Establish OGR types for each element\n ogrtypes = {}\n for name in fields:\n att = data[0][name]\n py_type = type(att)\n msg = ('Unknown type for storing vector '\n 'data: %s, %s' % (name, str(py_type)[1:-1]))\n assert py_type in TYPE_MAP, msg\n ogrtypes[name] = TYPE_MAP[py_type]\n\n else:\n msg = ('Input parameter \"data\" was specified '\n 'but appears to be empty')\n raise Exception(msg)\n\n # Create attribute fields in layer\n store_attributes = True\n for name in fields:\n fd = ogr.FieldDefn(name, ogrtypes[name])\n # FIXME (Ole): Trying to address issue #16\n # But it doesn't work and\n # somehow changes the values of MMI in test\n #width = max(128, len(name))\n #print name, width\n #fd.SetWidth(width)\n\n # Silent handling of warnings like\n # Warning 6: Normalized/laundered field name:\n #'CONTENTS_LOSS_AUD' to 'CONTENTS_L'\n gdal.PushErrorHandler('CPLQuietErrorHandler')\n if lyr.CreateField(fd) != 0:\n msg = 'Could not create field %s' % name\n raise Exception(msg)\n\n # Restore error handler\n gdal.PopErrorHandler()\n\n # Store geometry\n geom = ogr.Geometry(self.geometry_type)\n layer_def = lyr.GetLayerDefn()\n for i in range(N):\n # Create new feature instance\n feature = ogr.Feature(layer_def)\n\n # Store geometry and check\n if self.geometry_type == ogr.wkbPoint:\n x = float(geometry[i][0])\n y = float(geometry[i][1])\n geom.SetPoint_2D(0, x, y)\n elif self.geometry_type == ogr.wkbPolygon:\n wkt = array2wkt(geometry[i], geom_type='POLYGON')\n geom = ogr.CreateGeometryFromWkt(wkt)\n else:\n msg = 'Geometry type %s not implemented' % self.geometry_type\n raise Exception(msg)\n\n feature.SetGeometry(geom)\n\n G = feature.GetGeometryRef()\n if G is None:\n msg = 'Could not create GeometryRef for file %s' % filename\n raise Exception(msg)\n\n # Store attributes\n if store_attributes:\n for j, name in enumerate(fields):\n actual_field_name = layer_def.GetFieldDefn(j).GetNameRef()\n\n val = data[i][name]\n if type(val) == numpy.ndarray:\n # A singleton of type <type 'numpy.ndarray'> works\n # for gdal version 1.6 but fails for version 1.8\n # in SetField with error: NotImplementedError:\n # Wrong number of arguments for overloaded function\n val = float(val)\n\n feature.SetField(actual_field_name, val)\n\n # Save this feature\n if lyr.CreateFeature(feature) != 0:\n msg = 'Failed to create feature %i in file %s' % (i, filename)\n raise Exception(msg)\n\n feature.Destroy()\n\n # Write keywords if any\n write_keywords(self.keywords, basename + '.keywords')\n\n def get_attribute_names(self):\n \"\"\" Get available attribute names\n\n These are the ones that can be used with get_data\n \"\"\"\n\n return self.data[0].keys()\n\n def get_data(self, attribute=None, index=None):\n \"\"\"Get vector attributes\n\n Data is returned as a list where each entry is a dictionary of\n attributes for one feature. Entries in get_geometry() and\n get_data() are related as 1-to-1\n\n If optional argument attribute is specified and a valid name,\n then the list of values for that attribute is returned.\n\n If optional argument index is specified on the that value will\n be returned. Any value of index is ignored if attribute is None.\n \"\"\"\n\n if hasattr(self, 'data'):\n if attribute is None:\n return self.data\n else:\n msg = ('Specified attribute %s does not exist in '\n 'vector layer %s. Valid names are %s'\n '' % (attribute, self, self.data[0].keys()))\n assert attribute in self.data[0], msg\n\n if index is None:\n # Return all values for specified attribute\n return [x[attribute] for x in self.data]\n else:\n # Return value for specified attribute and index\n msg = ('Specified index must be either None or '\n 'an integer. I got %s' % index)\n assert type(index) == type(0)\n\n msg = ('Specified index must lie within the bounds '\n 'of vector layer %s which is [%i, %i]'\n '' % (self, 0, len(self) - 1))\n assert 0 <= index < len(self)\n\n return self.data[index][attribute]\n else:\n msg = 'Vector data instance does not have any attributes'\n raise Exception(msg)\n\n def get_geometry(self):\n \"\"\"Return geometry for vector layer.\n\n Depending on the feature type, geometry is\n\n geometry type output type\n -----------------------------\n point coordinates (Nx2 array of longitudes and latitudes)\n line TODO\n polygon list of arrays of coordinates\n\n \"\"\"\n return self.geometry\n\n def get_projection(self, proj4=False):\n \"\"\"Return projection of this layer as a string\n \"\"\"\n return self.projection.get_projection(proj4)\n\n def get_bounding_box(self):\n \"\"\"Get bounding box coordinates for vector layer.\n\n Format is [West, South, East, North]\n \"\"\"\n e = self.extent\n return [e[0], # West\n e[2], # South\n e[1], # East\n e[3]] # North\n\n def get_extrema(self, attribute=None):\n \"\"\"Get min and max values from specified attribute\n\n Return min, max\n \"\"\"\n if attribute is None:\n msg = ('Valid attribute name must be specified in get_extrema '\n 'for vector layers. I got None.')\n raise RuntimeError(msg)\n\n x = self.get_data(attribute)\n return min(x), max(x)\n\n def get_topN(self, attribute, N=10):\n \"\"\"Get top N features\n\n Input\n attribute: The name of attribute where values are sought\n N: How many\n\n Output\n layer: New vector layer with selected features\n \"\"\"\n\n # FIXME (Ole): Maybe generalise this to arbitrary expressions\n\n # Input checks\n msg = ('Specfied attribute must be a string. '\n 'I got %s' % (type(attribute)))\n assert isinstance(attribute, basestring), msg\n\n msg = 'Specified attribute was empty'\n assert attribute != '', msg\n\n msg = 'N must be a positive number. I got %i' % N\n assert N > 0, msg\n\n # Create list of values for specified attribute\n values = self.get_data(attribute)\n\n # Sort and select using Schwarzian transform\n A = zip(values, self.data, self.geometry)\n A.sort()\n\n # Pick top N and unpack\n _, data, geometry = zip(*A[-N:])\n\n # Create new Vector instance and return\n return Vector(data=data,\n projection=self.get_projection(),\n geometry=geometry)\n\n def interpolate(self, X, name=None):\n \"\"\"Interpolate values of this vector layer to other layer\n\n Input\n X: Layer object defining target\n name: Optional name of interpolated layer\n\n Output\n Y: Layer object with values of this vector layer interpolated to\n geometry of input layer X\n \"\"\"\n\n msg = 'Interpolation from vector layers not yet implemented'\n raise Exception(msg)\n\n @property\n def is_raster(self):\n return False\n\n @property\n def is_vector(self):\n return True\n\n @property\n def is_point_data(self):\n return self.is_vector and self.geometry_type == ogr.wkbPoint\n\n @property\n def is_polygon_data(self):\n return self.is_vector and self.geometry_type == ogr.wkbPolygon\n\n\n#----------------------------------\n# Helper functions for class Vector\n#----------------------------------\n\ndef convert_polygons_to_centroids(V):\n \"\"\"Convert polygon vector data to point vector data\n\n Input\n V: Vector layer with polygon data\n\n Output\n Vector layer with point data and the same attributes as V\n \"\"\"\n\n msg = 'Input data %s must be polygon vector data' % V\n assert V.is_polygon_data, msg\n\n geometry = V.get_geometry()\n N = len(V)\n\n # Calculate centroids for each polygon\n centroids = []\n for i in range(N):\n c = calculate_polygon_centroid(geometry[i])\n centroids.append(c)\n\n # Create new point vector layer with same attributes and return\n V = Vector(data=V.get_data(),\n projection=V.get_projection(),\n geometry=centroids,\n name='%s_centroid_data' % V.get_name(),\n keywords=V.get_keywords())\n return V\n" }, { "alpha_fraction": 0.7530864477157593, "alphanum_fraction": 0.7530864477157593, "avg_line_length": 22.14285659790039, "blob_id": "82d8ed53a9457865d35f413cc3543c4c2d6ef9eb", "content_id": "8c52d9af05b0d44987625c34de8cc6fd16e4274b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 162, "license_type": "no_license", "max_line_length": 64, "num_lines": 7, "path": "/impact/engine/utilities.py", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "\"\"\"Miscellaneous utility functions for Risk-in-a-Box (riab_core)\n\"\"\"\n\nimport sys\n\n# A maximum floating point number for this package\nMAXFLOAT = float(sys.maxint)\n" }, { "alpha_fraction": 0.5412763357162476, "alphanum_fraction": 0.5450819730758667, "avg_line_length": 36.53845977783203, "blob_id": "4925b97a8695646325b4b622bfd44cd85d7b2874", "content_id": "266f94693e345fbeab6f41cd1d56ac363b31c171", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3416, "license_type": "no_license", "max_line_length": 143, "num_lines": 91, "path": "/calculator/app/config.js", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "// map url patterns to exported JSGI app functions\nvar urls = [\n [(/^\\/proxy/), require(\"./proxy\").app]\n];\n\n// debug mode loads unminified scripts\n// assumes markup pulls in scripts under the path /servlet_name/script/\nif (java.lang.System.getProperty(\"app.debug\")) {\n var fs = require(\"fs\");\n var config = fs.normal(fs.join(module.directory, \"..\", \"buildjs.cfg\"));\n urls.push(\n [(/^\\/script(\\/.*)/), require(\"./autoloader\").App(config)]\n );\n\n // proxy a remote geoserver on /geoserver and the original path by setting\n // proxy.geoserver to remote URL - only recommended for debug mode\n var geoserver = java.lang.System.getProperty(\"app.proxy.geoserver\");\n if (geoserver) {\n if (geoserver.charAt(geoserver.length-1) !== \"/\") {\n geoserver = geoserver + \"/\";\n }\n var path = geoserver.split(\"/\");\n var geoserverEndpoint = path[path.length-2];\n if (geoserverEndpoint != \"geoserver\") {\n urls.push(\n [new RegExp(\"^\\\\/\" + geoserverEndpoint + \"\\\\/(.*)\"), require(\"./proxy\").pass({url: geoserver, preserveHost: true})]\n );\n }\n urls.push(\n [(/^\\/geoserver\\/(.*)/), require(\"./proxy\").pass({url: geoserver, preserveHost: true})]\n );\n }\n\n // proxy a remote geonode on / - only recommended for debug mode\n var geonode = java.lang.System.getProperty(\"app.proxy.geonode\");\n if (geonode) {\n if (geonode.charAt(geonode.length-1) !== \"/\") {\n geonode = geonode + \"/\";\n }\n var endpoints = [\"maps\", \"data\", \"accounts\", \"impact\", \"proxy\", \"lang.js\", \"jsi18n\"];\n var isJS, endpoint;\n for (var i=endpoints.length-1; i>=0; --i) {\n endpoint = endpoints[i];\n isJS = endpoint.indexOf(\".js\") == endpoint.length-3;\n urls.push(\n [new RegExp(\"^\\\\/\" + endpoints[i] + \"(\\\\/(.*))?\"), require(\"./proxy\").pass({url: geonode + endpoints[i] + (isJS ? \"#\" : \"/\")})]\n );\n }\n }\n}\n\nexports.urls = urls;\n\n// redirect requests without a trailing slash\n// Jetty does this automatically for /servlet_name, Tomcat does not\nfunction slash(config) {\n return function(app) {\n return function(request) {\n var response;\n var servletRequest = request.env.servletRequest;\n var pathInfo = servletRequest.getPathInfo();\n if (pathInfo === \"/\") {\n var uri = servletRequest.getRequestURI();\n if (uri.charAt(uri.length-1) !== \"/\") {\n var location = servletRequest.getScheme() + \"://\" + \n servletRequest.getServerName() + \":\" + servletRequest.getServerPort() + \n uri + \"/\";\n return {\n status: 301,\n headers: {\"Location\": location},\n body: []\n };\n }\n }\n return app(request);\n };\n };\n}\n\nexports.middleware = [\n slash(),\n require(\"ringo/middleware/gzip\").middleware,\n require(\"ringo/middleware/static\").middleware({base: module.resolve(\"static\"), index: \"index.html\"}),\n require(\"ringo/middleware/error\").middleware,\n require(\"ringo/middleware/notfound\").middleware\n];\n\nexports.app = require(\"ringo/webapp\").handleRequest;\n\nexports.charset = \"UTF-8\";\nexports.contentType = \"text/html\";\n" }, { "alpha_fraction": 0.5360521078109741, "alphanum_fraction": 0.5570737719535828, "avg_line_length": 31.360544204711914, "blob_id": "a6dda1501e1d6fcf35fcb4fbbba3b63c1fbff166", "content_id": "b83810c39976e7665310d742455f65c1f6303c1c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 4757, "license_type": "no_license", "max_line_length": 68, "num_lines": 147, "path": "/docs/usage/plugins/flood_plugins.rst", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "Example Flood Plugins\n=====================\n\nExample Flood Plugin\n--------------------\n\nThis example calculates the flood impact on building data\n\nPlugin code:: \n\n\tfrom django.template.loader import render_to_string\n\tfrom impact.plugins.core import FunctionProvider\n\tfrom impact.storage.vector import Vector\n\tfrom django.utils.translation import ugettext as _\n\tfrom impact.plugins.utilities import PointZoomSize\n\tfrom impact.plugins.utilities import PointClassColor\n\tfrom impact.plugins.utilities import PointSymbol\n\timport scipy.stats\n\n\n\tclass FloodBuildingImpactFunction(FunctionProvider):\n\t \"\"\"Risk plugin for flood impact on building data\n\n\t :param requires category=='hazard' and \\\n\t\t subcategory.startswith('flood') and \\\n\t\t layer_type=='raster' and \\\n\t\t unit=='m'\n\t :param requires category=='exposure' and \\\n\t\t subcategory.startswith('building')\n\t \"\"\"\n\n\t target_field = 'AFFECTED'\n\n\t def run(self, layers):\n\t\t\"\"\"Risk plugin for tsunami population\n\t\t\"\"\"\n\n\t\t# Extract data\n\t\t# FIXME (Ole): This will be replaced by a helper function\n\t\t# to separate hazard from exposure using keywords\n\t\tH = layers[0] # Depth\n\t\tE = layers[1] # Building locations\n\n\t\t# Interpolate hazard level to building locations\n\t\tH = H.interpolate(E)\n\n\t\t# Extract relevant numerical data\n\t\tcoordinates = E.get_geometry()\n\t\tdepth = H.get_data()\n\t\tN = len(depth)\n\n\t\t# List attributes to carry forward to result layer\n\t\tattributes = E.get_attribute_names()\n\n\t\t#print attributes\n\t\t#print 'Number of population points', N\n\n\t\t# Calculate population impact\n\t\tcount = 0\n\t\tbuilding_impact = []\n\t\tfor i in range(N):\n\t\t dep = float(depth[i].values()[0])\n\n\t\t # Tag and count\n\t\t if dep > 0.1:\n\t\t affected = 99.5\n\t\t count += 1\n\t\t else:\n\t\t affected = 0\n\n\t\t # Collect depth and calculated damage\n\t\t result_dict = {'AFFECTED': affected,\n\t\t 'DEPTH': dep}\n\n\t\t # Carry all original attributes forward\n\t\t for key in attributes:\n\t\t result_dict[key] = E.get_data(key, i)\n\n\t\t # Record result for this feature\n\t\t building_impact.append(result_dict)\n\n\t\t# Create report\n\t\tcaption = ('<table border=\"0\" width=\"320px\">'\n\t\t ' <tr><th><b>%s</b></th><th><b>%s</b></th></th>'\n\t\t ' <tr></tr>'\n\t\t ' <tr><td>%s&#58;</td><td>%i</td></tr>'\n\t\t ' <tr><td>%s (> 10 cm) &#58;</td><td>%i</td></tr>'\n\t\t ' <tr><td>%s (< 10 cm) &#58;</td><td>%i</td></tr>'\n\t\t '</table>' % (_('Buildings'), _('Total'),\n\t\t _('All'), N,\n\t\t _('Inundated'), count,\n\t\t _('Not inundated'), N - count))\n\n\t\t# Create vector layer and return\n\t\tV = Vector(data=building_impact,\n\t\t projection=E.get_projection(),\n\t\t geometry=coordinates,\n\t\t name='Estimated buildings affected',\n\t\t keywords={'caption': caption})\n\t\treturn V\n\n\t def generate_style(self, data):\n\t\t\"\"\"Generates and SLD file based on the data values\n\t\t\"\"\"\n\n\t\tDEFAULT_SYMBOL = 'circle'\n\n\t\tsymbol_field = None\n\t\tsymbol_keys = [None, '']\n\t\tsymbol_values = [DEFAULT_SYMBOL, DEFAULT_SYMBOL]\n\n\t\tscale_keys = [10000000000, 10000000, 5000000, 1000000,\n\t\t 500000, 250000, 100000]\n\t\tscale_values = [5, 5, 5, 5, 5, 8, 14]\n\n\t\tclass_keys = ['Not affected', 'Greater than 10 cm']\n\t\tclass_values = [{'min': 0, 'max': 90,\n\t\t 'color': '#cccccc', 'opacity': '0.2'},\n\t\t {'min': 90, 'max': 100,\n\t\t 'color': '#F31a0c', 'opacity': '1'}]\n\n\t\tif self.symbol_field in data.get_attribute_names():\n\t\t symbol_field = self.symbol_field\n\n\t\t symbol_keys.extend(['Church/Mosque', 'Commercial (office)',\n\t\t 'Hotel',\n\t\t 'Medical facility', 'Other',\n\t\t 'Other industrial',\n\t\t 'Residential', 'Retail', 'School',\n\t\t 'Unknown', 'Warehouse'])\n\n\t\t symbol_values.extend([DEFAULT_SYMBOL, DEFAULT_SYMBOL,\n\t\t DEFAULT_SYMBOL,\n\t\t DEFAULT_SYMBOL, DEFAULT_SYMBOL,\n\t\t DEFAULT_SYMBOL,\n\t\t DEFAULT_SYMBOL, DEFAULT_SYMBOL,\n\t\t DEFAULT_SYMBOL,\n\t\t DEFAULT_SYMBOL, DEFAULT_SYMBOL])\n\n\t\tparams = dict(name=data.get_name(),\n\t\t damage_field=self.target_field,\n\t\t symbol_field=symbol_field,\n\t\t symbols=dict(zip(symbol_keys, symbol_values)),\n\t\t scales=dict(zip(scale_keys, scale_values)),\n\t\t classifications=dict(zip(class_keys, class_values)))\n\n\t\treturn render_to_string('impact/styles/point_classes.sld', params)\n" }, { "alpha_fraction": 0.5580117106437683, "alphanum_fraction": 0.5682706236839294, "avg_line_length": 35.88288116455078, "blob_id": "abdb9282f6c63b6b3d55512635af252422bd6c08", "content_id": "3f1bca1f724f9c0cc7025b58ff71f91413ec0279", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8188, "license_type": "no_license", "max_line_length": 78, "num_lines": 222, "path": "/impact/plugins/mappings.py", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "\"\"\"Collection of mappings for standard vulnerability classes\n\"\"\"\nimport numpy\nfrom impact.storage.vector import Vector\n\n\ndef osm2padang(E):\n \"\"\"Map OSM attributes to Padang vulnerability classes\n\n This maps attributes collected in the OpenStreetMap exposure data\n (data.kompetisiosm.org) to 9 vulnerability classes identified by\n Geoscience Australia and ITB in the post 2009 Padang earthquake\n survey (http://trove.nla.gov.au/work/38470066).\n The mapping was developed by Abigail Baca, GFDRR.\n\n Input\n E: Vector object representing the OSM data\n\n Output:\n Vector object like E, but with one new attribute ('VCLASS')\n representing the vulnerability class used in the Padang dataset\n\n\n Algorithm\n\n 1. Class the \"levels\" field into height bands where 1-3 = low,\n 4-10 = mid, >10 = high\n 2. Where height band = mid then building type = 4\n \"RC medium rise Frame with Masonry in-fill walls\"\n 3. Where height band = high then building type = 6\n \"Concrete Shear wall high rise* Hazus C2H\"\n 4. Where height band = low and structure = (plastered or\n reinforced_masonry) then building type = 7\n \"RC low rise Frame with Masonry in-fill walls\"\n 5. Where height band = low and structure = confined_masonry then\n building type = 8 \"Confined Masonry\"\n 6. Where height band = low and structure = unreinforced_masonry then\n building type = 2 \"URM with Metal Roof\"\n \"\"\"\n\n # Input check\n required = ['levels', 'structure']\n actual = E.get_attribute_names()\n msg = ('Input data to osm2padang must have attributes %s. '\n 'It has %s' % (str(required), str(actual)))\n for attribute in required:\n assert attribute in actual, msg\n\n # Start mapping\n N = len(E)\n attributes = E.get_data()\n count = 0\n for i in range(N):\n levels = E.get_data('levels', i)\n structure = E.get_data('structure', i)\n if levels is None or structure is None:\n vulnerability_class = 2\n count += 1\n else:\n if levels >= 10:\n # High\n vulnerability_class = 6 # Concrete shear\n elif 4 <= levels < 10:\n # Mid\n vulnerability_class = 4 # RC mid\n elif 1 <= levels < 4:\n # Low\n if structure in ['plastered',\n 'reinforced masonry',\n 'reinforced_masonry']:\n vulnerability_class = 7 # RC low\n elif structure == 'confined_masonry':\n vulnerability_class = 8 # Confined\n elif 'kayu' in structure or 'wood' in structure:\n vulnerability_class = 9 # Wood\n else:\n vulnerability_class = 2 # URM\n elif numpy.allclose(levels, 0):\n # A few buildings exist with 0 levels.\n\n # In general, we should be assigning here the most\n # frequent building in the area which could be defined\n # by admin boundaries.\n vulnerability_class = 2\n else:\n msg = 'Unknown number of levels: %s' % levels\n raise Exception(msg)\n\n # Store new attribute value\n attributes[i]['VCLASS'] = vulnerability_class\n\n # Selfcheck for use with osm_080811.shp\n if E.get_name() == 'osm_080811':\n if levels > 0:\n msg = ('Got %s expected %s. levels = %f, structure = %s'\n % (vulnerability_class,\n attributes[i]['TestBLDGCl'],\n levels,\n structure))\n assert numpy.allclose(attributes[i]['TestBLDGCl'],\n vulnerability_class), msg\n\n #print 'Got %i without levels or structure (out of %i total)' % (count, N)\n\n # Create new vector instance and return\n V = Vector(data=attributes,\n projection=E.get_projection(),\n geometry=E.get_geometry(),\n name=E.get_name() + ' mapped to Padang vulnerability classes',\n keywords=E.get_keywords())\n return V\n\n\ndef osm2bnpb(E, target_attribute='VCLASS'):\n \"\"\"Map OSM attributes to BNPB vulnerability classes\n\n This maps attributes collected in the OpenStreetMap exposure data\n (data.kompetisiosm.org) to 2 vulnerability classes identified by\n BNPB in Kajian Risiko Gempabumi VERS 1.0, 2011. They are\n URM: Unreinforced Masonry and RM: Reinforced Masonry\n\n Input\n E: Vector object representing the OSM data\n target_attribute: Optional name of the attribute containing\n the mapped vulnerability class. Default\n value is 'VCLASS'\n\n Output:\n Vector object like E, but with one new attribute (e.g. 'VCLASS')\n representing the vulnerability class used in the guidelines\n \"\"\"\n\n # Input check\n required = ['levels', 'structure']\n actual = E.get_attribute_names()\n msg = ('Input data to osm2bnpb must have attributes %s. '\n 'It has %s' % (str(required), str(actual)))\n for attribute in required:\n assert attribute in actual, msg\n\n # Start mapping\n N = len(E)\n attributes = E.get_data()\n count = 0\n for i in range(N):\n levels = E.get_data('levels', i)\n structure = E.get_data('structure', i)\n if levels is None or structure is None:\n vulnerability_class = 'URM'\n count += 1\n else:\n if levels >= 4:\n # High\n vulnerability_class = 'RM'\n elif 1 <= levels < 4:\n # Low\n if structure in ['reinforced masonry',\n 'reinforced_masonry']:\n vulnerability_class = 'RM'\n elif structure == 'confined_masonry':\n vulnerability_class = 'RM'\n elif 'kayu' in structure or 'wood' in structure:\n vulnerability_class = 'RM'\n else:\n vulnerability_class = 'URM'\n elif numpy.allclose(levels, 0):\n # A few buildings exist with 0 levels.\n\n # In general, we should be assigning here the most\n # frequent building in the area which could be defined\n # by admin boundaries.\n vulnerability_class = 'URM'\n else:\n msg = 'Unknown number of levels: %s' % levels\n raise Exception(msg)\n\n # Store new attribute value\n attributes[i][target_attribute] = vulnerability_class\n\n #print 'Got %i without levels or structure (out of %i total)' % (count, N)\n\n # Create new vector instance and return\n V = Vector(data=attributes,\n projection=E.get_projection(),\n geometry=E.get_geometry(),\n name=E.get_name() + ' mapped to BNPB vulnerability classes',\n keywords=E.get_keywords())\n return V\n\n\ndef unspecific2bnpb(E, target_attribute='VCLASS'):\n \"\"\"Map Unspecific point data to BNPB vulnerability classes\n\n This makes no assumptions about attributes and maps everything to\n URM: Unreinforced Masonry\n\n Input\n E: Vector object representing the OSM data\n target_attribute: Optional name of the attribute containing\n the mapped vulnerability class. Default\n value is 'VCLASS'\n\n Output:\n Vector object like E, but with one new attribute (e.g. 'VCLASS')\n representing the vulnerability class used in the guidelines\n \"\"\"\n\n # Start mapping\n N = len(E)\n attributes = E.get_data()\n count = 0\n for i in range(N):\n # Store new attribute value\n attributes[i][target_attribute] = 'URM'\n\n # Create new vector instance and return\n V = Vector(data=attributes,\n projection=E.get_projection(),\n geometry=E.get_geometry(),\n name=E.get_name() + ' mapped to BNPB vulnerability class URM',\n keywords=E.get_keywords())\n return V\n" }, { "alpha_fraction": 0.6227470636367798, "alphanum_fraction": 0.6277191042900085, "avg_line_length": 20.33333396911621, "blob_id": "841297cdb8eab3f22afc6f6d60f40db5211e1760", "content_id": "289c35e183a90c7a2ef461b0703d8ee89171f504", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1609, "license_type": "no_license", "max_line_length": 350, "num_lines": 75, "path": "/docs/index.rst", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": ".. _index:\n\n.. image:: http://riskinabox.org/media/riab-logo.png\n\t:scale: 50 %\n\t:align: center\n\n======================\nRisiko's documentation\n======================\n\n.. rubric:: Risiko is a web based tool that models impacts of different hazard events on population or infrastructure. It is part of a set of Open Source Software tools called Risk in a Box, and we encourage you to build new applications using its components and the resources it provides. The project home page can be found at http://riskinabox.org/\n\n\n.. figure:: images/screenshot.png\n\t:scale: 76 %\n \n Risiko Screenshot showing earthquake hazard in Indonesia\n\n\n=============================\nIntroduction and Installation\n=============================\n\n.. toctree::\n :maxdepth: 3\n :numbered:\n\n intro/basic_install\n intro/faq\n intro/glossary\n \n===========\nUsage Guide\n===========\n\n.. toctree::\n :maxdepth: 3\n :numbered:\n\n usage/overview\n usage/risiko_calculator\n usage/plugins/development\n usage/plugins/examples_intro\n usage/plugins/earthquake_plugins\n usage/plugins/tsunami_plugins\n usage/plugins/flood_plugins\n \n \n===============\nDeveloper Guide\n===============\n\n.. toctree::\n :maxdepth: 3\n :numbered:\n\n development/overview\n development/architecture\n development/pluginmanager\n development/documentation\n development/contributing\n development/release-process\n development/git\n development/dev_help\n\n\n===========================\nProduction Deployment Guide\n===========================\n\n.. toctree::\n :maxdepth: 3\n :numbered:\n\n deployment/production_install\n \n\n\n\n\n\n" }, { "alpha_fraction": 0.5430644750595093, "alphanum_fraction": 0.5481123328208923, "avg_line_length": 32.96071243286133, "blob_id": "ca6e8e37d827cfd21728991f8daf3eff6429cb73", "content_id": "a303ab53044db02e9e9ea9c49a685a90f9398588", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19018, "license_type": "no_license", "max_line_length": 79, "num_lines": 560, "path": "/impact/storage/raster.py", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "\"\"\"Class Raster\n\"\"\"\n\nimport os\nimport numpy\nfrom osgeo import gdal\nfrom impact.storage.projection import Projection\nfrom impact.storage.utilities import DRIVER_MAP\nfrom impact.engine.interpolation import interpolate_raster_vector\nfrom impact.storage.utilities import read_keywords\nfrom impact.storage.utilities import write_keywords\nfrom impact.storage.utilities import nanallclose\nfrom impact.storage.utilities import geotransform2bbox, geotransform2resolution\n\n\nclass Raster:\n \"\"\"Internal representation of raster data\n \"\"\"\n\n def __init__(self, data=None, projection=None, geotransform=None,\n name='Raster layer', keywords=None):\n \"\"\"Initialise object with either data or filename\n\n Input\n data: Can be either\n * a filename of a raster file format known to GDAL\n * an MxN array of raster data\n * None (FIXME (Ole): Remove this option)\n projection: Geospatial reference in WKT format.\n Only used if data is provide as a numeric array,\n geotransform: GDAL geotransform (6-tuple).\n (top left x, w-e pixel resolution, rotation,\n top left y, rotation, n-s pixel resolution).\n See e.g. http://www.gdal.org/gdal_tutorial.html\n Only used if data is provide as a numeric array,\n name: Optional name for layer.\n Only used if data is provide as a numeric array,\n keywords: Optional dictionary with keywords that describe the\n layer. When the layer is stored, these keywords will\n be written into an associated file with extension\n .keywords.\n\n Keywords can for example be used to display text\n about the layer in a web application.\n\n Note that if data is a filename, all other arguments are ignored\n as they will be inferred from the file.\n \"\"\"\n\n # Input checks\n if data is None:\n # Instantiate empty object\n self.name = name\n self.data = None\n self.projection = None\n self.coordinates = None\n self.filename = None\n self.keywords = {}\n return\n\n # Initialisation\n if isinstance(data, basestring):\n self.read_from_file(data)\n else:\n # Assume that data is provided as an array\n # with extra keyword arguments supplying metadata\n if keywords is None:\n self.keywords = {}\n else:\n msg = ('Specified keywords must be either None or a '\n 'dictionary. I got %s' % keywords)\n assert isinstance(keywords, dict), msg\n self.keywords = keywords\n\n self.data = numpy.array(data, dtype='d', copy=False)\n\n self.filename = None\n self.name = name\n\n self.projection = Projection(projection)\n self.geotransform = geotransform\n\n self.rows = data.shape[0]\n self.columns = data.shape[1]\n\n self.number_of_bands = 1\n\n def __str__(self):\n return self.name\n\n def __len__(self):\n \"\"\"Size of data set defined as total number of grid points\n \"\"\"\n return len(self.get_data().flat)\n\n def __eq__(self, other, rtol=1.0e-5, atol=1.0e-8):\n \"\"\"Override '==' to allow comparison with other raster objecs\n\n Input\n other: Raster instance to compare to\n rtol, atol: Relative and absolute tolerance.\n See numpy.allclose for details\n \"\"\"\n\n # Check type\n if not isinstance(other, Raster):\n msg = ('Raster instance cannot be compared to %s'\n ' as its type is %s ' % (str(other), type(other)))\n raise TypeError(msg)\n\n # Check projection\n if self.projection != other.projection:\n return False\n\n # Check geotransform\n if self.get_geotransform() != other.get_geotransform():\n return False\n\n # Check data\n if not nanallclose(self.get_data(),\n other.get_data(),\n rtol=rtol, atol=atol):\n return False\n\n # Check keywords\n if self.keywords != other.keywords:\n return False\n\n # Raster layers are identical up to the specified tolerance\n return True\n\n def __ne__(self, other):\n \"\"\"Override '!=' to allow comparison with other projection objecs\n \"\"\"\n return not self == other\n\n def get_name(self):\n return self.name\n\n def get_keywords(self, key=None):\n \"\"\"Return keywords dictionary\n \"\"\"\n if key is None:\n return self.keywords\n else:\n if key in self.keywords:\n return self.keywords[key]\n else:\n msg = ('Keyword %s does not exist in %s: Options are '\n '%s' % (self.get_name(), self.keywords.keys()))\n raise Exception(msg)\n\n def get_caption(self):\n \"\"\"Return 'caption' keyword if present. Otherwise ''.\n \"\"\"\n if 'caption' in self.keywords:\n return self.keywords['caption']\n else:\n return ''\n\n def read_from_file(self, filename):\n\n # Open data file for reading\n # File must be kept open, otherwise GDAL methods segfault.\n fid = self.fid = gdal.Open(filename, gdal.GA_ReadOnly)\n if fid is None:\n msg = 'Could not open file %s' % filename\n raise Exception(msg)\n\n # Record raster metadata from file\n basename, ext = os.path.splitext(filename)\n\n # If file is ASCII, check that projection is around.\n # GDAL does not check this nicely, so it is worth an\n # error message\n if ext == '.asc':\n try:\n open(basename + '.prj')\n except IOError:\n msg = ('Projection file not found for %s. You must supply '\n 'a projection file with extension .prj' % filename)\n raise RuntimeError(msg)\n\n # Look for any keywords\n self.keywords = read_keywords(basename + '.keywords')\n\n # Determine name\n if 'title' in self.keywords:\n rastername = self.keywords['title']\n else:\n # Use basename without leading directories as name\n rastername = os.path.split(basename)[-1]\n\n self.name = rastername\n self.filename = filename\n\n self.projection = Projection(self.fid.GetProjection())\n self.geotransform = self.fid.GetGeoTransform()\n self.columns = fid.RasterXSize\n self.rows = fid.RasterYSize\n self.number_of_bands = fid.RasterCount\n\n # Assume that file contains all data in one band\n msg = 'Only one raster band currently allowed'\n if self.number_of_bands > 1:\n msg = ('WARNING: Number of bands in %s are %i. '\n 'Only the first band will currently be '\n 'used.' % (filename, self.number_of_bands))\n # FIXME(Ole): Let us use python warnings here\n raise Exception(msg)\n\n # Get first band.\n band = self.band = fid.GetRasterBand(1)\n if band is None:\n msg = 'Could not read raster band from %s' % filename\n raise Exception(msg)\n\n def write_to_file(self, filename):\n \"\"\"Save raster data to file\n\n Input\n filename: filename with extension .tif\n \"\"\"\n\n # Check file format\n basename, extension = os.path.splitext(filename)\n\n msg = ('Invalid file type for file %s. Only extension '\n 'tif allowed.' % filename)\n assert extension in ['.tif', '.asc'], msg\n format = DRIVER_MAP[extension]\n\n # Get raster data\n A = self.get_data()\n\n # Get Dimensions. Note numpy and Gdal swap order\n N, M = A.shape\n\n # Create empty file.\n # FIXME (Ole): It appears that this is created as single\n # precision even though Float64 is specified\n # - see issue #17\n driver = gdal.GetDriverByName(format)\n fid = driver.Create(filename, M, N, 1, gdal.GDT_Float64)\n if fid is None:\n msg = ('Gdal could not create filename %s using '\n 'format %s' % (filename, format))\n raise Exception(msg)\n\n # Write metada\n fid.SetProjection(str(self.projection))\n fid.SetGeoTransform(self.geotransform)\n\n # Write data\n fid.GetRasterBand(1).WriteArray(A)\n\n # Write keywords if any\n write_keywords(self.keywords, basename + '.keywords')\n\n def interpolate(self, X, name=None):\n \"\"\"Interpolate values of this raster layer to other layer\n\n Input\n X: Layer object defining target\n name: Optional name of interpolated layer.\n If name is None, the name of self is used.\n\n Output\n Y: Layer object with values of this raster layer interpolated to\n geometry of input layer X\n\n Note: If target geometry is polygon, data will be interpolated to\n its centroids and the output is a point data set.\n \"\"\"\n\n if X.is_raster:\n if self.get_geotransform() != X.get_geotransform():\n # Need interpolation between grids\n msg = 'Intergrid interpolation not yet implemented'\n raise Exception(msg)\n else:\n # Rasters are aligned, no need to interpolate\n return self\n else:\n # Interpolate this raster layer to geometry of X\n return interpolate_raster_vector(self, X, name)\n\n def get_data(self, nan=True, scaling=None):\n \"\"\"Get raster data as numeric array\n\n Input\n nan: Optional flag controlling handling of missing values.\n If nan is True (default), nodata values will be replaced\n with numpy.nan\n If keyword nan has a numeric value, nodata values will\n be replaced by that value. E.g. to set missing values to 0,\n do get_data(nan=0.0)\n scaling: Optional flag controlling if data is to be scaled\n if it has been resampled. Admissible values are\n False: data is retrieved without modification.\n True: Data is rescaled based on the squared ratio between\n its current and native resolution. This is typically\n required if raster data represents a density\n such as population per km^2\n None: The behaviour will depend on the keyword \"density\"\n associated with the layer. If density is \"true\" or\n \"yes\" (ignoring case), scaling will be applied\n otherwise not. This is the default.\n scalar value: If scaling takes a numerical scalar value,\n that will be use to scale the data\n\n \"\"\"\n\n # FIXME (Ole): Once we have the ability to use numpy.nan throughout,\n # make that the default and name everything better\n\n if hasattr(self, 'data'):\n A = self.data\n assert A.shape[0] == self.rows and A.shape[1] == self.columns\n else:\n # Read from raster file\n A = self.band.ReadAsArray()\n\n M, N = A.shape\n msg = ('Dimensions of raster array do not match those of '\n 'raster file %s' % self.filename)\n assert M == self.rows, msg\n assert N == self.columns, msg\n\n if nan is False:\n pass\n else:\n if nan is True:\n NAN = numpy.nan\n else:\n NAN = nan\n\n # Replace NODATA_VALUE with NaN\n nodata = self.get_nodata_value()\n\n NaN = numpy.ones(A.shape, A.dtype) * NAN\n A = numpy.where(A == nodata, NaN, A)\n\n # Take care of possible scaling\n if scaling is None:\n # Redefine scaling from density keyword if possible\n kw = self.get_keywords()\n if 'density' in kw and kw['density'].lower() in ['true', 'yes']:\n scaling = True\n else:\n scaling = False\n\n if scaling is False:\n # No change\n sigma = 1\n elif scaling is True:\n # Calculate scaling based on resolution change\n\n actual_res = self.get_resolution(isotropic=True)\n native_res = self.get_resolution(isotropic=True, native=True)\n sigma = (actual_res / native_res) ** 2\n else:\n # See if scaling can work as a scalar value\n try:\n sigma = float(scaling)\n except Exception, e:\n msg = ('Keyword scaling \"%s\" could not be converted to a '\n 'number. It must be either True, False, None or a '\n 'number: %s' % (scaling, str(e)))\n raise Exception(msg)\n\n # Return possibly scaled data\n return sigma * A\n\n def get_projection(self, proj4=False):\n \"\"\"Return projection of this layer as a string.\n \"\"\"\n return self.projection.get_projection(proj4)\n\n def get_geotransform(self):\n \"\"\"Return geotransform for this raster layer\n\n Output\n geotransform: 6 digit vector\n (top left x, w-e pixel resolution, rotation,\n top left y, rotation, n-s pixel resolution).\n\n See e.g. http://www.gdal.org/gdal_tutorial.html\n \"\"\"\n\n return self.geotransform\n\n def get_geometry(self):\n \"\"\"Return longitudes and latitudes (the axes) for grid.\n\n Return two vectors (longitudes and latitudes) corresponding to\n grid. The values are offset by half a pixel size to correspond to\n pixel registration.\n\n I.e. If the grid origin (top left corner) is (105, 10) and the\n resolution is 1 degrees in each direction, then the vectors will\n take the form\n\n longitudes = [100.5, 101.5, ..., 109.5]\n latitudes = [0.5, 1.5, ..., 9.5]\n \"\"\"\n\n # Get parameters for axes\n g = self.get_geotransform()\n\n lon_ul = g[0] # Longitude of upper left corner\n lat_ul = g[3] # Latitude of upper left corner\n dx = g[1] # Longitudinal resolution\n dy = - g[5] # Latitudinal resolution (always(?) negative)\n nx = self.columns\n ny = self.rows\n\n assert dx > 0\n assert dy > 0\n\n # Coordinates of lower left corner\n lon_ll = lon_ul\n lat_ll = lat_ul - ny * dy\n\n # Coordinates of upper right corner\n lon_ur = lon_ul + nx * dx\n\n # Define pixel centers along each directions\n dy2 = dy / 2\n dx2 = dx / 2\n\n # Define longitudes and latitudes for each axes\n x = numpy.linspace(lon_ll + dx2,\n lon_ur - dx2, nx)\n y = numpy.linspace(lat_ll + dy2,\n lat_ul - dy2, ny)\n\n # Return\n return x, y\n\n def __mul__(self, other):\n return self.get_data() * other.get_data()\n\n def __add__(self, other):\n return self.get_data() + other.get_data()\n\n def get_extrema(self):\n \"\"\"Get min and max from raster\n If raster has a nominated no_data value, this is ignored.\n\n Return min, max\n \"\"\"\n\n A = self.get_data(nan=True)\n min = numpy.nanmin(A.flat[:])\n max = numpy.nanmax(A.flat[:])\n\n return min, max\n\n def get_nodata_value(self):\n \"\"\"Get the internal representation of NODATA\n\n If the internal value is None, the standard -9999 is assumed\n \"\"\"\n\n if hasattr(self, 'band'):\n nodata = self.band.GetNoDataValue()\n else:\n nodata = None\n\n # Use common default in case nodata was not registered in raster file\n if nodata is None:\n nodata = -9999\n\n return nodata\n\n def get_bins(self, N=10, quantiles=False):\n \"\"\"Get N values between the min and the max occurred in this dataset.\n\n Return sorted list of length N+1 where the first element is min and\n the last is max. Intermediate values depend on the keyword quantiles:\n If quantiles is True, they represent boundaries between quantiles.\n If quantiles is False, they represent equidistant interval boundaries.\n \"\"\"\n\n min, max = self.get_extrema()\n\n levels = []\n if quantiles is False:\n # Linear intervals\n d = (max - min) / N\n\n for i in range(N):\n levels.append(min + i * d)\n else:\n # Quantiles\n # FIXME (Ole): Not 100% sure about this algorithm,\n # but it is close enough\n\n A = self.get_data(nan=True).flat[:]\n\n mask = numpy.logical_not(numpy.isnan(A)) # Omit NaN's\n A = A.compress(mask)\n\n A.sort()\n\n assert len(A) == A.shape[0]\n\n d = float(len(A) + 0.5) / N\n for i in range(N):\n levels.append(A[int(i * d)])\n\n levels.append(max)\n\n return levels\n\n def get_bounding_box(self):\n \"\"\"Get bounding box coordinates for raster layer\n\n Format is [West, South, East, North]\n \"\"\"\n\n return geotransform2bbox(self.geotransform, self.columns, self.rows)\n\n def get_resolution(self, isotropic=False, native=False):\n \"\"\"Get raster resolution as a 2-tuple (resx, resy)\n\n Input\n isotropic: If True, verify that dx == dy and return dx\n If False return 2-tuple (dx, dy)\n native: Optional flag. If True, return native resolution if\n available. Otherwise return actual.\n \"\"\"\n\n # Get actual resolution first\n try:\n res = geotransform2resolution(self.geotransform,\n isotropic=isotropic)\n except Exception, e:\n msg = ('Resolution for layer %s could not be obtained: %s '\n % (self.get_name(), str(e)))\n raise Exception(msg)\n\n if native:\n keywords = self.get_keywords()\n if 'resolution' in keywords:\n # Clunky but works - see issue #171\n res = float(keywords['resolution'])\n if not isotropic:\n res = (res, res)\n\n return res\n\n @property\n def is_raster(self):\n return True\n\n @property\n def is_vector(self):\n return False\n" }, { "alpha_fraction": 0.5488215684890747, "alphanum_fraction": 0.5690235495567322, "avg_line_length": 26, "blob_id": "a408082d0189ef3d16bd9e6b31907ccc06f0f284", "content_id": "1a0182910d743d520abbf858be0d7e5fd8147fcd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 297, "license_type": "no_license", "max_line_length": 69, "num_lines": 11, "path": "/risiko/__init__.py", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "\"\"\"Risk In a Box.\"\"\"\n\n# Define package meta data\nVERSION = (0, 4, 0)\n\n__version__ = '.'.join(map(str, VERSION[0:3])) + ''.join(VERSION[3:])\n__author__ = 'Ole Nielsen, Ted Dunstone, Ariel Nunez'\n__contact__ = '[email protected]'\n__homepage__ = 'http://riskinabox.org/'\n__docformat__ = 'restructuredtext'\n__license__ = 'GPL'\n" }, { "alpha_fraction": 0.7074074149131775, "alphanum_fraction": 0.7111111283302307, "avg_line_length": 32.75, "blob_id": "f99651eaac59dd0bdd24a78c5a6ce13e20cda13f", "content_id": "c340808a1c5c41f5f2f2450cd107402c14069d53", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 270, "license_type": "no_license", "max_line_length": 77, "num_lines": 8, "path": "/calculator/app/main.js", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "#!/usr/bin/env ringo\n\n// main script to start application\n// when run from the command line, assume debug mode (scripts not compressed)\nif (require.main == module) {\n java.lang.System.setProperty(\"app.debug\", 1);\n require(\"ringo/webapp\").main(module.directory);\n}\n" }, { "alpha_fraction": 0.5927797555923462, "alphanum_fraction": 0.6043321490287781, "avg_line_length": 26.700000762939453, "blob_id": "aa5cdefd9e35c198fdb3d226c0cb00956a8e2ae5", "content_id": "fdba4d4900995138c1d651c92e9f40e57c322a80", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1385, "license_type": "no_license", "max_line_length": 67, "num_lines": 50, "path": "/impact/tests/plugins/empirical_fatality_model.py", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "from impact.plugins.core import FunctionProvider\nfrom impact.storage.raster import Raster\n\nimport scipy\nimport scipy.stats\nimport numpy\n\n\nclass EmpiricalFatalityFunction(FunctionProvider):\n \"\"\"Risk plugin for earthquake damage based on empirical results\n\n :author Hadi Ghasemi\n :rating 2\n\n :param requires category=='doesnotexist'\n :param requires title=='neverwas'\n \"\"\"\n\n @staticmethod\n def run(layers,\n teta=14.05, beta=0.17, zeta=2.15):\n \"\"\"Risk plugin for earthquake fatalities\n\n Input\n H: Numerical array of hazard data\n E: Numerical array of exposure data\n \"\"\"\n\n # Identify input layers\n intensity = layers[0]\n population = layers[1]\n\n # Extract data\n H = intensity.get_data(nan=0)\n P = population.get_data(nan=0)\n\n # Calculate impact\n logHazard = 1 / beta * scipy.log(H / teta)\n\n # Convert array to be standard floats expected by cdf\n arrayout = numpy.array([[float(value) for value in row]\n for row in logHazard])\n F = scipy.stats.norm.cdf(arrayout * P)\n\n # Create new layer and return\n R = Raster(F,\n projection=population.get_projection(),\n geotransform=population.get_geotransform(),\n name='Estimated fatalities')\n return R\n" }, { "alpha_fraction": 0.6842105388641357, "alphanum_fraction": 0.6842105388641357, "avg_line_length": 27, "blob_id": "0887ebc9659752b0d7b81dfd85caa31b54b278a1", "content_id": "f639aed7a015c3cf92616a1856c7a065c779e8f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 57, "license_type": "no_license", "max_line_length": 32, "num_lines": 2, "path": "/docs/usage/index.rst", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": ".. include:: usage/basic_install\n.. include:: usage/faq\n\n" }, { "alpha_fraction": 0.5693243741989136, "alphanum_fraction": 0.5822455286979675, "avg_line_length": 27.88662338256836, "blob_id": "ea9418eed1bdd5f352353e16786b92ae3c3cfbab", "content_id": "921ac9da6be9642ddc0548fb91ff0d36d28cdce6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 22676, "license_type": "no_license", "max_line_length": 78, "num_lines": 785, "path": "/impact/storage/utilities.py", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "\"\"\"Utilities for impact.storage\n\"\"\"\n\nimport os\nimport copy\nimport numpy\nfrom osgeo import ogr\nfrom tempfile import mkstemp\nfrom urllib2 import urlopen\n\n# Spatial layer file extensions that are recognised in Risiko\n# FIXME: Perhaps add '.gml', '.zip', ...\nLAYER_TYPES = ['.shp', '.asc', '.tif', '.tiff', '.geotif', '.geotiff']\n\n# Map between extensions and ORG drivers\nDRIVER_MAP = {'.shp': 'ESRI Shapefile',\n '.gml': 'GML',\n '.tif': 'GTiff',\n '.asc': 'AAIGrid'}\n\n# Map between Python types and OGR field types\n# FIXME (Ole): I can't find a double precision type for OGR\nTYPE_MAP = {type(None): ogr.OFTString, # What else should this be?\n type(''): ogr.OFTString,\n type(0): ogr.OFTInteger,\n type(0.0): ogr.OFTReal,\n type(numpy.array([0.0])[0]): ogr.OFTReal, # numpy.float64\n type(numpy.array([[0.0]])[0]): ogr.OFTReal} # numpy.ndarray\n\n# Templates for downloading layers through rest\nWCS_TEMPLATE = '%s?version=1.0.0' + \\\n '&service=wcs&request=getcoverage&format=GeoTIFF&' + \\\n 'store=false&coverage=%s&crs=EPSG:4326&bbox=%s' + \\\n '&resx=%s&resy=%s'\n\nWFS_TEMPLATE = '%s?service=WFS&version=1.0.0' + \\\n '&request=GetFeature&typeName=%s' + \\\n '&outputFormat=SHAPE-ZIP&bbox=%s'\n\n\n# Miscellaneous auxiliary functions\ndef unique_filename(**kwargs):\n \"\"\"Create new filename guaranteed not to exist previoously\n\n Use mkstemp to create the file, then remove it and return the name\n\n See http://docs.python.org/library/tempfile.html for details.\n \"\"\"\n\n _, filename = mkstemp(**kwargs)\n\n try:\n os.remove(filename)\n except:\n pass\n\n return filename\n\n\ndef truncate_field_names(data, n=10):\n \"\"\"Truncate field names to fixed width\n\n Input\n data: List of dictionary with names as keys. Can also be None.\n n: Max number of characters allowed\n\n Output\n dictionary with same values as data but with keys truncated\n\n THIS IS OBSOLETE AFTER OGR'S OWN FIELD NAME LAUNDERER IS USED\n \"\"\"\n\n if data is None:\n return None\n\n N = len(data)\n\n # Check if truncation is needed\n need_to_truncate = False\n for key in data[0]:\n if len(key) > n:\n need_to_truncate = True\n\n if not need_to_truncate:\n return data\n\n # Go ahead and truncate attribute table for every entry\n new = []\n for i in range(N):\n D = {} # New dictionary\n for key in data[i]:\n x = key[:n]\n if x in D:\n msg = ('Truncated attribute name %s is duplicated: %s ' %\n (key, str(D.keys())))\n raise Exception(msg)\n\n D[x] = data[i][key]\n\n new.append(D)\n\n return new\n\n\"\"\" FIXME: The truncation method can be replaced with something like this\n\n>>> from osgeo import ogr\n>>> from osgeo import osr\n>>> drv = ogr.GetDriverByName('ESRI Shapefile')\n>>> ds = drv.CreateDataSource('shptest.shp')\n>>> lyr = ds.CreateLayer('mylyr', osr.SpatialReference(), ogr.wkbPolygon)\n>>> fd = ogr.FieldDefn('A slightly long name', ogr.OFTString)\n>>> lyr.CreateField(fd)\nWarning 6: Normalized/laundered field name: 'A slightly long name'\nto 'A slightly'\n0\n>>> layer_defn = lyr.GetLayerDefn()\n>>> last_field_idx = layer_defn.GetFieldCount() - 1\n>>> real_field_name = layer_defn.GetFieldDefn(last_field_idx).GetNameRef()\n>>> feature = ogr.Feature(layer_defn)\n>>> feature.SetField('A slightly', 'value')\n>>> real_field_name\n'A slightly'\n\"\"\"\n\n\"\"\"To suppress Warning 6:\n\nYes, you can surround the CreateField() call with :\n\ngdal.PushErrorHandler('CPLQuietErrorHandler')\n...\ngdal.PopErrorHandler()\n\n\n\"\"\"\n\n\n# GeoServer utility functions\ndef is_server_reachable(url):\n \"\"\"Make an http connection to url to see if it is accesible.\n\n Returns boolean\n \"\"\"\n try:\n urlopen(url)\n except Exception:\n return False\n else:\n return True\n\n\ndef write_keywords(keywords, filename):\n \"\"\"Write keywords dictonary to file\n\n Input\n keywords: Dictionary of keyword, value pairs\n filename: Name of keywords file. Extension expected to be .keywords\n\n Keys must be strings\n Values must be strings or None.\n\n If value is None, only the key will be written. Otherwise key, value pairs\n will be written as key: value\n\n Trailing or preceding whitespace will be ignored.\n \"\"\"\n\n # Input checks\n basename, ext = os.path.splitext(filename)\n\n # FIXME (Ole): Why don't we just pass in the filename and let\n # this function decide the extension?\n msg = ('Unknown extension for file %s. '\n 'Expected %s.keywords' % (filename, basename))\n assert ext == '.keywords', msg\n\n # Write\n fid = open(filename, 'w')\n for k, v in keywords.items():\n\n msg = ('Key in keywords dictionary must be a string. '\n 'I got %s with type %s' % (k, str(type(k))[1:-1]))\n assert isinstance(k, basestring), msg\n\n key = k.strip()\n\n msg = ('Key in keywords dictionary must not contain the \":\" '\n 'character. I got \"%s\"' % key)\n assert ':' not in key, msg\n\n if v is None:\n fid.write('%s\\n' % key)\n else:\n msg = ('Keyword value must be a string. '\n 'For key %s, I got %s with type %s'\n % (k, v, str(type(v))[1:-1]))\n assert isinstance(v, basestring), msg\n\n val = v.strip()\n\n msg = ('Value in keywords dictionary must be a string or None. '\n 'I got %s with type %s' % (val, type(val)))\n assert isinstance(val, basestring), msg\n\n msg = ('Value must not contain the \":\" character. '\n 'I got \"%s\"' % val)\n assert ':' not in val, msg\n\n # FIXME (Ole): Have to remove commas (issue #148)\n val = val.replace(',', '')\n\n fid.write('%s: %s\\n' % (key, val))\n fid.close()\n\n\ndef read_keywords(filename):\n \"\"\"Read keywords dictonary from file\n\n Input\n filename: Name of keywords file. Extension expected to be .keywords\n The format of one line is expected to be either\n string: string\n or\n string\n Output\n keywords: Dictionary of keyword, value pairs\n\n \"\"\"\n\n # Input checks\n basename, ext = os.path.splitext(filename)\n\n msg = ('Unknown extension for file %s. '\n 'Expected %s.keywords' % (filename, basename))\n assert ext == '.keywords', msg\n\n if not os.path.isfile(filename):\n return {}\n\n # Read\n keywords = {}\n fid = open(filename, 'r')\n for line in fid.readlines():\n text = line.strip()\n if text == '':\n continue\n\n fields = text.split(':')\n\n msg = ('Keyword must be either \"string\" or \"string: string\". '\n 'I got %s ' % text)\n assert len(fields) in [1, 2], msg\n\n key = fields[0].strip()\n\n if len(fields) == 2:\n val = fields[1].strip()\n else:\n val = None\n\n keywords[key] = val # .replace(' ', '_')\n fid.close()\n\n return keywords\n\n\ndef extract_geotransform(layer):\n \"\"\"Extract geotransform from OWS layer object.\n\n Input\n layer: Raster layer object e.g. obtained from WebCoverageService\n\n Output:\n geotransform: GDAL geotransform (www.gdal.org/gdal_tutorial.html)\n \"\"\"\n\n grid = layer.grid\n\n top_left_x = float(grid.origin[0])\n we_pixel_res = float(grid.offsetvectors[0][0])\n x_rotation = float(grid.offsetvectors[0][1])\n top_left_y = float(grid.origin[1])\n y_rotation = float(grid.offsetvectors[1][0])\n ns_pixel_res = float(grid.offsetvectors[1][1])\n\n # There is half a pixel_resolution difference between\n # what WCS reports and what GDAL reports.\n # A pixel CENTER vs pixel CORNER difference.\n adjusted_top_left_x = top_left_x - we_pixel_res / 2\n adjusted_top_left_y = top_left_y - ns_pixel_res / 2\n\n return (adjusted_top_left_x, we_pixel_res, x_rotation,\n adjusted_top_left_y, y_rotation, ns_pixel_res)\n\n\ndef geotransform2bbox(geotransform, columns, rows):\n \"\"\"Convert geotransform to bounding box\n\n Input\n geotransform: GDAL geotransform (6-tuple).\n (top left x, w-e pixel resolution, rotation,\n top left y, rotation, n-s pixel resolution).\n See e.g. http://www.gdal.org/gdal_tutorial.html\n columns: Number of columns in grid\n rows: Number of rows in grid\n\n Output\n bbox: Bounding box as a list of geographic coordinates\n [west, south, east, north]\n\n Rows and columns are needed to determine eastern and northern bounds.\n FIXME: Not sure if the pixel vs gridline registration issue is observed\n correctly here. Need to check against gdal > v1.7\n \"\"\"\n\n x_origin = geotransform[0] # top left x\n y_origin = geotransform[3] # top left y\n x_res = geotransform[1] # w-e pixel resolution\n y_res = geotransform[5] # n-s pixel resolution\n x_pix = columns\n y_pix = rows\n\n minx = x_origin\n maxx = x_origin + (x_pix * x_res)\n miny = y_origin + (y_pix * y_res)\n maxy = y_origin\n\n return [minx, miny, maxx, maxy]\n\n\ndef geotransform2resolution(geotransform, isotropic=False,\n # FIXME (Ole): Check these tolerances (issue #173)\n rtol=5.0e-2, atol=1.0e-2):\n \"\"\"Convert geotransform to resolution\n\n Input\n geotransform: GDAL geotransform (6-tuple).\n (top left x, w-e pixel resolution, rotation,\n top left y, rotation, n-s pixel resolution).\n See e.g. http://www.gdal.org/gdal_tutorial.html\n Input\n isotropic: If True, verify that dx == dy and return dx\n If False (default) return 2-tuple (dx, dy)\n rtol, atol: Used to control how close dx and dy must be\n to quality for isotropic. These are passed on to\n numpy.allclose for comparison.\n\n Output\n resolution: grid spacing (resx, resy) in (positive) decimal\n degrees ordered as longitude first, then latitude.\n or resx (if isotropic is True)\n \"\"\"\n\n resx = geotransform[1] # w-e pixel resolution\n resy = - geotransform[5] # n-s pixel resolution (always negative)\n\n if isotropic:\n msg = ('Resolution requested with '\n 'isotropic=True, but '\n 'resolutions in the horizontal and vertical '\n 'are different: resx = %.12f, resy = %.12f. '\n % (resx, resy))\n assert numpy.allclose(resx, resy,\n rtol=rtol, atol=atol), msg\n\n return resx\n else:\n return resx, resy\n\n\ndef bbox_intersection(*args):\n \"\"\"Compute intersection between two or more bounding boxes\n\n Input\n args: two or more bounding boxes.\n Each is assumed to be a list or a tuple with\n four coordinates (W, S, E, N)\n\n Output\n result: The minimal common bounding box\n\n \"\"\"\n\n msg = 'Function bbox_intersection must take at least 2 arguments.'\n assert len(args) > 1, msg\n\n result = [-180, -90, 180, 90]\n for a in args:\n msg = ('Bounding box expected to be a list of the '\n 'form [W, S, E, N]. '\n 'Instead i got \"%s\"' % str(a))\n try:\n box = list(a)\n except:\n raise Exception(msg)\n\n assert len(box) == 4, msg\n\n msg = 'Western boundary must be less than eastern. I got %s' % box\n assert box[0] < box[2], msg\n\n msg = 'Southern boundary must be less than northern. I got %s' % box\n assert box[1] < box[3], msg\n\n # Compute intersection\n\n # West and South\n for i in [0, 1]:\n result[i] = max(result[i], box[i])\n\n # East and North\n for i in [2, 3]:\n result[i] = min(result[i], box[i])\n\n # Check validity and return\n if result[0] < result[2] and result[1] < result[3]:\n return result\n else:\n return None\n\n\ndef minimal_bounding_box(bbox, min_res, eps=1.0e-6):\n \"\"\"Grow bounding box to exceed specified resolution if needed\n\n Input\n bbox: Bounding box with format [W, S, E, N]\n min_res: Minimal acceptable resolution to exceed\n eps: Optional tolerance that will be applied to 'buffer' result\n\n Ouput\n Adjusted bounding box guaranteed to exceed specified resolution\n \"\"\"\n\n # FIXME (Ole): Probably obsolete now\n\n bbox = copy.copy(list(bbox))\n\n delta_x = bbox[2] - bbox[0]\n delta_y = bbox[3] - bbox[1]\n\n if delta_x < min_res:\n dx = (min_res - delta_x) / 2 + eps\n bbox[0] -= dx\n bbox[2] += dx\n\n if delta_y < min_res:\n dy = (min_res - delta_y) / 2 + eps\n bbox[1] -= dy\n bbox[3] += dy\n\n return bbox\n\n\ndef buffered_bounding_box(bbox, resolution):\n \"\"\"Grow bounding box with one unit of resolution in each direction\n\n\n This will ensure there is enough pixels to robustly provide\n interpolated values without having to painstakingly deal with\n all corner cases such as 1 x 1, 1 x 2 and 2 x 1 arrays.\n\n The border will also make sure that points that would otherwise fall\n outside the domain (as defined by a tight bounding box) get assigned\n values.\n\n Input\n bbox: Bounding box with format [W, S, E, N]\n resolution: (resx, resy) - Raster resolution in each direction.\n res - Raster resolution in either direction\n If resolution is None bbox is returned unchanged.\n\n Ouput\n Adjusted bounding box\n\n\n Case in point: Interpolation point O would fall outside this domain\n even though there are enough grid points to support it\n\n --------------\n | |\n | * * | * *\n | O|\n | |\n | * * | * *\n --------------\n \"\"\"\n\n bbox = copy.copy(list(bbox))\n\n if resolution is None:\n return bbox\n\n try:\n resx, resy = resolution\n except:\n resx = resy = resolution\n\n bbox[0] -= resx\n bbox[1] -= resy\n bbox[2] += resx\n bbox[3] += resy\n\n return bbox\n\n\ndef get_geometry_type(geometry):\n \"\"\"Determine geometry type based on data\n\n Input\n geometry: A list of either point coordinates [lon, lat] or polygons\n which are assumed to be numpy arrays of coordinates\n\n Output\n geometry_type: Either ogr.wkbPoint or ogr.wkbPolygon\n\n If geometry type cannot be determined an Exception is raised.\n\n Note, there is no consistency check across all entries of the\n geometry list, only the first element is used in this determination.\n \"\"\"\n\n msg = 'Argument geometry must be a sequence. I got %s ' % type(geometry)\n assert is_sequence(geometry), msg\n\n msg = ('The first element in geometry must be a sequence of length > 2. '\n 'I got %s ' % str(geometry[0]))\n assert is_sequence(geometry[0]), msg\n assert len(geometry[0]) >= 2, msg\n\n if len(geometry[0]) == 2:\n try:\n float(geometry[0][0])\n float(geometry[0][1])\n except:\n pass\n else:\n # This geometry appears to be point data\n geometry_type = ogr.wkbPoint\n elif len(geometry[0]) > 2:\n try:\n x = numpy.array(geometry[0])\n except:\n pass\n else:\n # This geometry appears to be polygon data\n if x.shape[0] > 2 and x.shape[1] == 2:\n geometry_type = ogr.wkbPolygon\n\n if geometry_type is None:\n msg = 'Could not determine geometry type'\n raise Exception(msg)\n\n return geometry_type\n\n\ndef is_sequence(x):\n \"\"\"Determine if x behaves like a true sequence but not a string\n\n This will for example return True for lists, tuples and numpy arrays\n but False for strings and dictionaries.\n \"\"\"\n\n if isinstance(x, basestring):\n return False\n\n try:\n x[0]\n except:\n return False\n else:\n return True\n\n\ndef array2wkt(A, geom_type='POLYGON'):\n \"\"\"Convert coordinates to wkt format\n\n Input\n A: Nx2 Array of coordinates representing either a polygon or a line.\n A can be either a numpy array or a list of coordinates.\n geom_type: Determines output keyword 'POLYGON' or 'LINESTRING'\n\n Output\n wkt: geometry in the format known to ogr: Examples\n\n POLYGON((1020 1030,1020 1045,1050 1045,1050 1030,1020 1030))\n LINESTRING(1000 1000, 1100 1050)\n\n \"\"\"\n\n if geom_type == 'LINESTRING':\n # One bracket\n n = 1\n elif geom_type == 'POLYGON':\n # Two brackets (tsk tsk)\n n = 2\n else:\n msg = 'Unknown geom_type: %s' % geom_type\n raise Exception(msg)\n\n wkt_string = geom_type + '(' * n\n\n N = len(A)\n for i in range(N):\n # Works for both lists and arrays\n wkt_string += '%f %f, ' % tuple(A[i])\n\n return wkt_string[:-2] + ')' * n\n\n# Map of ogr numerical geometry types to their textual representation\n# FIXME (Ole): Some of them don't exist, even though they show up\n# when doing dir(ogr) - Why?:\ngeometry_type_map = {ogr.wkbPoint: 'Point',\n ogr.wkbPoint25D: 'Point25D',\n ogr.wkbPolygon: 'Polygon',\n ogr.wkbPolygon25D: 'Polygon25D',\n #ogr.wkbLinePoint: 'LinePoint', # ??\n ogr.wkbGeometryCollection: 'GeometryCollection',\n ogr.wkbGeometryCollection25D: 'GeometryCollection25D',\n ogr.wkbLineString: 'LineString',\n ogr.wkbLineString25D: 'LineString25D',\n ogr.wkbLinearRing: 'LinearRing',\n ogr.wkbMultiLineString: 'MultiLineString',\n ogr.wkbMultiLineString25D: 'MultiLineString25D',\n ogr.wkbMultiPoint: 'MultiPoint',\n ogr.wkbMultiPoint25D: 'MultiPoint25D',\n ogr.wkbMultiPolygon: 'MultiPolygon',\n ogr.wkbMultiPolygon25D: 'MultiPolygon25D',\n ogr.wkbNDR: 'NDR',\n ogr.wkbNone: 'None',\n ogr.wkbUnknown: 'Unknown'}\n\n\ndef geometrytype2string(g_type):\n \"\"\"Provides string representation of numeric geometry types\n\n FIXME (Ole): I can't find anything like this in ORG. Why?\n \"\"\"\n\n if g_type in geometry_type_map:\n return geometry_type_map[g_type]\n elif g_type is None:\n return 'No geometry type assigned'\n else:\n return 'Unknown geometry type: %s' % str(g_type)\n\n\ndef calculate_polygon_area(polygon, signed=False):\n \"\"\"Calculate the signed area of non-self-intersecting polygon\n\n Input\n polygon: Numeric array of points (longitude, latitude). It is assumed\n to be closed, i.e. first and last points are identical\n signed: Optional flag deciding whether returned area retains its sign:\n If points are ordered counter clockwise, the signed area\n will be positive.\n If points are ordered clockwise, it will be negative\n Default is False which means that the area is always positive.\n\n Output\n area: Area of polygon (subject to the value of argument signed)\n\n Sources\n http://paulbourke.net/geometry/polyarea/\n http://en.wikipedia.org/wiki/Centroid\n \"\"\"\n\n # Make sure it is numeric\n P = numpy.array(polygon)\n\n msg = ('Polygon is assumed to consist of coordinate pairs. '\n 'I got second dimension %i instead of 2' % P.shape[1])\n assert P.shape[1] == 2, msg\n\n x = P[:, 0]\n y = P[:, 1]\n\n # Calculate 0.5 sum_{i=0}^{N-1} (x_i y_{i+1} - x_{i+1} y_i)\n a = x[:-1] * y[1:]\n b = y[:-1] * x[1:]\n\n A = numpy.sum(a - b) / 2.\n\n if signed:\n return A\n else:\n return abs(A)\n\n\ndef calculate_polygon_centroid(polygon):\n \"\"\"Calculate the centroid of non-self-intersecting polygon\n\n Input\n polygon: Numeric array of points (longitude, latitude). It is assumed\n to be closed, i.e. first and last points are identical\n\n Sources\n http://paulbourke.net/geometry/polyarea/\n http://en.wikipedia.org/wiki/Centroid\n \"\"\"\n\n # Make sure it is numeric\n P = numpy.array(polygon)\n\n # Normalise to ensure numerical accurracy.\n # This requirement in backed by tests in test_io.py and without it\n # centroids at building footprint level may get shifted outside the\n # polygon!\n P_origin = numpy.amin(P, axis=0)\n P = P - P_origin\n\n # Get area. This calculation could be incorporated to save time\n # if necessary as the two formulas are very similar.\n A = calculate_polygon_area(polygon, signed=True)\n\n x = P[:, 0]\n y = P[:, 1]\n\n # Calculate\n #Cx = sum_{i=0}^{N-1} (x_i + x_{i+1})(x_i y_{i+1} - x_{i+1} y_i)/(6A)\n\n # Calculate\n # Cy = sum_{i=0}^{N-1} (y_i + y_{i+1})(x_i y_{i+1} - x_{i+1} y_i)/(6A)\n a = x[:-1] * y[1:]\n b = y[:-1] * x[1:]\n\n cx = x[:-1] + x[1:]\n cy = y[:-1] + y[1:]\n\n Cx = numpy.sum(cx * (a - b)) / (6. * A)\n Cy = numpy.sum(cy * (a - b)) / (6. * A)\n\n # Translate back to real location\n C = numpy.array([Cx, Cy]) + P_origin\n return C\n\n\ndef titelize(s):\n \"\"\"Convert string into title\n\n This is better than the built-in method title() because\n it leaves all uppercase words like UK unchanged.\n\n Source http://stackoverflow.com/questions/1549641/\n how-to-capitalize-the-first-letter-of-each-word-in-a-string-python\n \"\"\"\n\n # Replace underscores with spaces\n s = s.replace('_', ' ')\n\n # Capitalise\n #s = s.title() # This will capitalize first letter force the rest down\n s = ' '.join([w[0].upper() + w[1:] for w in s.split(' ')])\n\n return s\n\n\ndef nanallclose(x, y, rtol=1.0e-5, atol=1.0e-8):\n \"\"\"Numpy allclose function which allows NaN\n\n Input\n x, y: Either scalars or numpy arrays\n\n Output\n True or False\n\n Returns True if all non-nan elements pass.\n \"\"\"\n\n xn = numpy.isnan(x)\n yn = numpy.isnan(y)\n if numpy.any(xn != yn):\n # Presence of NaNs is not the same in x and y\n return False\n\n if numpy.all(xn):\n # Everything is NaN.\n # This will also take care of x and y being NaN scalars\n return True\n\n # Filter NaN's out\n if numpy.any(xn):\n x = x[-xn]\n y = y[-yn]\n\n # Compare non NaN's and return\n return numpy.allclose(x, y, rtol=rtol, atol=atol)\n" }, { "alpha_fraction": 0.609387993812561, "alphanum_fraction": 0.613863468170166, "avg_line_length": 35.12030029296875, "blob_id": "c663ce50c7d1908027ca3e0ee998efc43c01bd1d", "content_id": "3d73b6df040ed276861e04197e9241b740518aac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9608, "license_type": "no_license", "max_line_length": 79, "num_lines": 266, "path": "/impact/engine/core.py", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "\"\"\"Computational engine for Risk in a Box core.\n\nProvides the function calculate_impact()\n\"\"\"\n\nimport sys\nimport numpy\n\nfrom impact.storage.projection import Projection\nfrom impact.storage.projection import DEFAULT_PROJECTION\nfrom impact.storage.utilities import unique_filename\nfrom impact.storage.utilities import bbox_intersection\nfrom impact.storage.utilities import buffered_bounding_box\nfrom impact.storage.io import bboxlist2string, bboxstring2list\nfrom impact.storage.io import check_bbox_string\n\nimport logging\nlogger = logging.getLogger('risiko')\n\ndef calculate_impact(layers, impact_fcn,\n comment=''):\n \"\"\"Calculate impact levels as a function of list of input layers\n\n Input\n FIXME (Ole): For the moment we take only a list with two\n elements containing one hazard level one exposure level\n\n layers: List of Raster and Vector layer objects to be used for analysis\n\n impact_fcn: Function of the form f(layers)\n comment:\n\n Output\n filename of resulting impact layer (GML). Comment is embedded as\n metadata. Filename is generated from input data and date.\n\n Note\n The admissible file types are tif and asc/prj for raster and\n gml or shp for vector data\n\n Assumptions\n 1. All layers are in WGS84 geographic coordinates\n 2. Layers are equipped with metadata such as names and categories\n \"\"\"\n\n # Input checks\n check_data_integrity(layers)\n\n # Get an instance of the passed impact_fcn\n impact_function = impact_fcn()\n\n # Pass input layers to plugin\n\n # FIXME (Ole): When issue #21 has been fully implemented, this\n # return value should be a list of layers.\n F = impact_function.run(layers)\n\n # Write result and return filename\n if F.is_raster:\n extension = '.tif'\n # use default style for raster\n else:\n extension = '.shp'\n # use default style for vector\n\n output_filename = unique_filename(suffix=extension)\n F.write_to_file(output_filename)\n\n # Generate style as defined by the impact_function\n style = impact_function.generate_style(F)\n f = open(output_filename.replace(extension, '.sld'), 'w')\n f.write(style)\n f.close()\n\n return output_filename\n\n\ndef check_data_integrity(layer_files):\n \"\"\"Read list of layer files and verify that that they have the same\n projection and georeferencing.\n \"\"\"\n\n # Set default values for projection and geotransform.\n # Enforce DEFAULT (WGS84).\n # Choosing 'None' will use value of first layer.\n reference_projection = Projection(DEFAULT_PROJECTION)\n geotransform = None\n coordinates = None\n\n for layer in layer_files:\n\n # Ensure that projection is consistent across all layers\n if reference_projection is None:\n reference_projection = layer.projection\n else:\n msg = ('Projections in input layer %s is not as expected:\\n'\n 'projection: %s\\n'\n 'default: %s'\n '' % (layer, layer.projection, reference_projection))\n assert reference_projection == layer.projection, msg\n\n # Ensure that geotransform and dimensions is consistent across\n # all *raster* layers\n if layer.is_raster:\n if geotransform is None:\n geotransform = layer.get_geotransform()\n else:\n msg = ('Geotransforms in input raster layers are different: '\n '%s %s' % (geotransform, layer.get_geotransform()))\n # FIXME (Ole): Use high tolerance until we find out\n # why geoserver changes resolution.\n assert numpy.allclose(geotransform,\n layer.get_geotransform(),\n rtol=1.0e-1), msg\n\n # In either case of vector layers, we check that the coordinates\n # are the same\n if layer.is_vector:\n if coordinates is None:\n coordinates = layer.get_geometry()\n else:\n msg = ('Coordinates in input vector layers are different: '\n '%s %s' % (coordinates, layer.get_geometry()))\n assert numpy.allclose(coordinates,\n layer.get_geometry()), msg\n\n msg = ('There are no data points to interpolate to. '\n 'Perhaps zoom out or pan to the study area '\n 'and try again')\n assert len(layer) > 0, msg\n\n # Check that arrays are aligned.\n #\n # We have observerd Geoserver resolution changes - see ticket:102\n # https://github.com/AIFDR/riab/issues/102\n #\n # However, both rasters are now downloaded with exactly the same\n # parameters since we have made bbox and resolution variable in ticket:103\n # https://github.com/AIFDR/riab/issues/103\n #\n # So if they are still not aligned, we raise an Exception\n\n # First find the minimum dimensions\n M = N = sys.maxint\n refname = ''\n for layer in layer_files:\n if layer.is_raster:\n if layer.rows < M:\n refname = layer.get_name()\n M = layer.rows\n if layer.columns < N:\n refname = layer.get_name()\n N = layer.columns\n\n # Then check for alignment\n for layer in layer_files:\n if layer.is_raster:\n data = layer.get_data()\n\n msg = ('Rasters are not aligned!\\n'\n 'Raster %s has %i rows but raster %s has %i rows\\n'\n 'Refer to issue #102' % (layer.get_name(),\n layer.rows,\n refname, M))\n assert layer.rows == M, msg\n\n msg = ('Rasters are not aligned!\\n'\n 'Raster %s has %i columns but raster %s has %i columns\\n'\n 'Refer to issue #102' % (layer.get_name(),\n layer.columns,\n refname, N))\n assert layer.columns == N, msg\n\n\ndef get_common_resolution(haz_metadata, exp_metadata):\n \"\"\"Determine common resolution for raster layers\n\n Input\n haz_metadata: Metadata for hazard layer\n exp_metadata: Metadata for exposure layer\n\n Output\n raster_resolution: Common resolution or None (in case of vector layers)\n \"\"\"\n\n # Determine resolution in case of raster layers\n haz_res = exp_res = None\n if haz_metadata['layer_type'] == 'raster':\n haz_res = haz_metadata['resolution']\n\n if exp_metadata['layer_type'] == 'raster':\n exp_res = exp_metadata['resolution']\n\n # Determine common resolution in case of two raster layers\n if haz_res is None or exp_res is None:\n # This means native resolution will be used\n raster_resolution = None\n else:\n # Take the minimum\n resx = min(haz_res[0], exp_res[0])\n resy = min(haz_res[1], exp_res[1])\n\n raster_resolution = (resx, resy)\n\n return raster_resolution\n\n\ndef get_bounding_boxes(haz_metadata, exp_metadata, req_bbox):\n \"\"\"Check and get appropriate bounding boxes for input layers\n\n Input\n haz_metadata: Metadata for hazard layer\n exp_metadata: Metadata for exposure layer\n req_bbox: Bounding box (string) as requested by HTML POST.\n\n Output\n haz_bbox: Bounding box to be used for hazard layer.\n exp_bbox: Bounding box to be used for exposure layer\n imp_bbox: Bounding box to be used for resulting impact layer\n\n Note exp_bbox and imp_bbox are the same and calculated as the\n intersection among hazard, exposure and viewport bounds.\n haz_bbox may be grown by one pixel size in case exposure data\n is vector data to make sure points always can be interpolated\n \"\"\"\n\n # Input checks\n msg = ('Invalid bounding box %s (%s). '\n 'It must be a string' % (str(req_bbox), type(req_bbox)))\n assert isinstance(req_bbox, basestring), msg\n check_bbox_string(req_bbox)\n\n # Get bounding boxes for layers and viewport\n haz_bbox = haz_metadata['bounding_box']\n exp_bbox = exp_metadata['bounding_box']\n vpt_bbox = bboxstring2list(req_bbox)\n\n # New bounding box for data common to hazard, exposure and viewport\n # Download only data within this intersection\n intersection_bbox = bbox_intersection(vpt_bbox, haz_bbox, exp_bbox)\n if intersection_bbox is None:\n # Bounding boxes did not overlap\n msg = ('Bounding boxes of hazard data [%s], exposure data [%s] '\n 'and viewport [%s] did not overlap, so no computation was '\n 'done. Please make sure you pan to where the data is and '\n 'that hazard and exposure data overlaps.'\n % (bboxlist2string(haz_bbox, decimals=3),\n bboxlist2string(exp_bbox, decimals=3),\n bboxlist2string(vpt_bbox, decimals=3)))\n logger.info(msg)\n raise Exception(msg)\n\n # Grow hazard bbox to buffer this common bbox in case where\n # hazard is raster and exposure is vector\n if (haz_metadata['layer_type'] == 'raster' and\n exp_metadata['layer_type'] == 'vector'):\n\n haz_res = haz_metadata['resolution']\n haz_bbox = buffered_bounding_box(intersection_bbox, haz_res)\n else:\n haz_bbox = intersection_bbox\n\n # Usually the intersection bbox is used for both exposure layer and result\n exp_bbox = imp_bbox = intersection_bbox\n\n return haz_bbox, exp_bbox, imp_bbox\n" }, { "alpha_fraction": 0.602703332901001, "alphanum_fraction": 0.6212084889411926, "avg_line_length": 29.46323585510254, "blob_id": "b45098617fd22c96e606b427c790735af95cba09", "content_id": "31993f5296684b64f2bf8e465c11acde45c6e8f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 12429, "license_type": "no_license", "max_line_length": 196, "num_lines": 408, "path": "/docs/usage/plugins/earthquake_plugins.rst", "repo_name": "AIFDR/riab", "src_encoding": "UTF-8", "text": "Earthquakes Plugins\n===================\n\nSimple Earthquake Damage\n------------------------\n\nThis example calculates earthquake damage\n\nPlugin code::\n\n\tfrom impact.plugins.core import FunctionProvider\n\tfrom impact.storage.raster import Raster\n\n\n\tclass EarthquakeFatalityFunction(FunctionProvider):\n\t \"\"\"Risk plugin for earthquake damage\n\n\t :author Allen\n\t :rating 1\n\t :param requires category=='hazard' and \\\n\t\t subcategory.startswith('earthquake') and \\\n\t\t layer_type=='raster'\n\t :param requires category=='exposure' and \\\n\t\t subcategory.startswith('population') and \\\n\t\t layer_type=='raster'\n\t \"\"\"\n\n\t @staticmethod\n\t def run(layers,\n\t\t a=0.97429, b=11.037):\n\t\t\"\"\"Risk plugin for earthquake fatalities\n\n\t\tInput\n\t\t layers: List of layers expected to contain\n\t\t H: Raster layer of MMI ground shaking\n\t\t P: Raster layer of population data on the same grid as H\n\t\t\"\"\"\n\n\t\t# Identify input layers\n\t\tintensity = layers[0]\n\t\tpopulation = layers[1]\n\n\t\t# Extract data\n\t\tH = intensity.get_data(nan=0)\n\t\tP = population.get_data(nan=0)\n\n\t\t# Calculate impact\n\t\tF = 10 ** (a * H - b) * P\n\n\t\t# Create new layer and return\n\t\tR = Raster(F,\n\t\t projection=population.get_projection(),\n\t\t geotransform=population.get_geotransform(),\n\t\t name='Estimated fatalities')\n\t\treturn R\n\n\nUSGS Fatality Function\n----------------------\n\nThe plugin is an implementation of the USGS Fatality Function guidelines\n\n.. warning:: This code has not been independantly verified as complying with the USGS guidelines\n\n\nPlugin code::\n\n\tfrom impact.plugins.core import FunctionProvider\n\tfrom impact.storage.raster import Raster\n\n\timport scipy\n\timport scipy.stats\n\timport numpy\n\n\n\tclass USGSFatalityFunction(FunctionProvider):\n\t \"\"\"Risk plugin for earthquake damage based on empirical results\n\n\t :author Hadi Ghasemi\n\t :rating 2\n\n\t :param requires category == 'hazard' and \\\n\t\t subcategory == 'earthquake' and \\\n\t\t unit == 'mmi' and \\\n\t\t layer_type == 'raster'\n\n\t :param requires category == 'exposure' and \\\n\t\t subcategory == 'population' and \\\n\t\t layer_type == 'raster'\n\t \"\"\"\n\n\t @staticmethod\n\t def run(layers,\n\t\t teta=14.05, beta=0.17, zeta=2.15):\n\t\t\"\"\"Risk plugin for earthquake fatalities\n\n\t\tInput\n\t\t H: Numerical array of hazard data\n\t\t E: Numerical array of exposure data\n\t\t\"\"\"\n\n\t\t# Identify input layers\n\t\tintensity = layers[0]\n\t\tpopulation = layers[1]\n\n\t\tprint\n\t\tprint '------------------'\n\t\tprint 'Got input layers'\n\t\tprint intensity\n\t\tprint population\n\n\t\tprint 'Population Resolution', population.get_geotransform()\n\n\t\t# Extract data\n\t\tH = intensity.get_data(nan=0) # Ground Shaking\n\t\tP = population.get_data(nan=0) # Population Density\n\n\t\t# Calculate population affected by each MMI level\n\t\tfor mmi in range(2, 10):\n\t\t mask = numpy.logical_and(mmi - 0.5 < H,\n\t\t H <= mmi + 0.5)\n\t\t I = numpy.where(mask, P, 0)\n\n\t\t # Generate text with result for this study\n\t\t number_of_people_affected = sum(I.flat)\n\n\t\t print ('Number of people affected by mmi '\n\t\t 'level %i: %.0f' % (mmi,\n\t\t number_of_people_affected / 1000))\n\n\t\t# Calculate impact\n\t\tlogHazard = 1 / beta * scipy.log(H / teta)\n\n\t\t# Convert array to be standard floats expected by cdf\n\t\tarrayout = numpy.array([[float(value) for value in row]\n\t\t for row in logHazard])\n\t\tF = scipy.stats.norm.cdf(arrayout * P)\n\n\t\t# Create new layer and return\n\t\tR = Raster(F,\n\t\t projection=population.get_projection(),\n\t\t geotransform=population.get_geotransform(),\n\t\t name='Estimated fatalities')\n\t\treturn R\n\n\n\nPost Earthquake Survey Plugin\n-----------------------------\n\nThis impact function estimates percentual damage to buildings as a\nfunction of ground shaking measured in MMI.\nBuildings are currently assumed to be represented in OpenStreetMap with\nattributes collected as during the July 2011 Indonesian mapping competition.\n\nThis impact function maps the OSM buildings into 2 classes:\nUnreinforced masonry (URM) and reinforced masonry (RM) according to\nthe guidelines.\n\nPlugin code::\n\n\t\"\"\"Impact function based on Padang 2009 post earthquake survey\n\n\tThis impact function estimates percentual damage to buildings as a\n\tfunction of ground shaking measured in MMI.\n\tBuildings are currently assumed to be represented in OpenStreetMap with\n\tattributes collected as during the July 2011 Indonesian mapping competition.\n\n\tThis impact function maps the OSM buildings into 2 classes:\n\tUnreinforced masonry (URM) and reinforced masonry (RM) according to\n\tthe guidelines.\n\t\"\"\"\n\n\tfrom django.template.loader import render_to_string\n\tfrom impact.plugins.core import FunctionProvider\n\tfrom impact.storage.vector import Vector\n\tfrom django.utils.translation import ugettext as _\n\tfrom impact.plugins.utilities import PointZoomSize\n\tfrom impact.plugins.utilities import PointClassColor\n\tfrom impact.plugins.utilities import PointSymbol\n\tfrom impact.plugins.mappings import osm2bnpb\n\n\t# Damage 'curves' for the two vulnerability classes\n\tdamage_parameters = {'URM': [6, 7],\n\t\t 'RM': [6, 8]}\n\n\n\tclass EarthquakeGuidelinesFunction(FunctionProvider):\n\t \"\"\"Risk plugin for BNPB guidelines for earthquake damage to buildings\n\n\t :param requires category=='hazard' and \\\n\t\t subcategory.startswith('earthquake') and \\\n\t\t layer_type=='raster'\n\t :param requires category=='exposure' and \\\n\t\t subcategory.startswith('building') and \\\n\t\t layer_type=='vector'\n\t \"\"\"\n\n\t # FIXME (Ole): Something like this too\n\t # and \\\n\t # datatype=='osm'\n\n\t vclass_tag = 'VCLASS'\n\t target_field = 'DMGLEVEL'\n\n\t def run(self, layers):\n\t\t\"\"\"Risk plugin for earthquake school damage\n\t\t\"\"\"\n\n\t\t# Extract data\n\t\tH = layers[0] # Ground shaking\n\t\tE = layers[1] # Building locations\n\n\t\t# Map from OSM attributes to the guideline classes (URM and RM)\n\t\t# FIXME (Ole): Not very robust way of deciding\n\t\t# Need keyword identifier for each kind of building dataset.\n\t\tif E.get_name().lower().startswith('osm'):\n\t\t # Map from OSM attributes to the padang building classes\n\t\t E = osm2bnpb(E, target_attribute=self.vclass_tag)\n\n\t\t# Interpolate hazard level to building locations\n\t\tH = H.interpolate(E)\n\n\t\t# Extract relevant numerical data\n\t\tcoordinates = E.get_geometry()\n\t\tshaking = H.get_data()\n\t\tN = len(shaking)\n\n\t\t# List attributes to carry forward to result layer\n\t\tattributes = E.get_attribute_names()\n\n\t\t# Calculate building damage\n\t\tcount3 = 0\n\t\tcount2 = 0\n\t\tcount1 = 0\n\t\tbuilding_damage = []\n\t\tfor i in range(N):\n\t\t mmi = float(shaking[i].values()[0])\n\n\t\t building_class = E.get_data(self.vclass_tag, i)\n\t\t lo, hi = damage_parameters[building_class]\n\n\t\t if mmi < lo:\n\t\t damage = 1 # Low\n\t\t count1 += 1\n\t\t elif lo <= mmi < hi:\n\t\t damage = 2 # Medium\n\t\t count2 += 1\n\t\t else:\n\t\t damage = 3 # High\n\t\t count3 += 1\n\n\t\t # Collect shake level and calculated damage\n\t\t result_dict = {self.target_field: damage,\n\t\t 'MMI': mmi}\n\n\t\t # Carry all orginal attributes forward\n\t\t for key in attributes:\n\t\t result_dict[key] = E.get_data(key, i)\n\n\t\t # Record result for this feature\n\t\t building_damage.append(result_dict)\n\n\t\t# Create report\n\t\tcaption = ('<table border=\"0\" width=\"320px\">'\n\t\t ' <tr><th><b>%s</b></th><th><b>%s</b></th></th>'\n\t\t ' <tr></tr>'\n\t\t ' <tr><td>%s&#58;</td><td>%i</td></tr>'\n\t\t ' <tr><td>%s (10-25%%)&#58;</td><td>%i</td></tr>'\n\t\t ' <tr><td>%s (25-50%%)&#58;</td><td>%i</td></tr>'\n\t\t ' <tr><td>%s (50-100%%)&#58;</td><td>%i</td></tr>'\n\t\t '</table>' % (_('Buildings'), _('Total'),\n\t\t _('All'), N,\n\t\t _('Low damage'), count1,\n\t\t _('Medium damage'), count2,\n\t\t _('High damage'), count3))\n\n\t\t# Create vector layer and return\n\t\tV = Vector(data=building_damage,\n\t\t projection=E.get_projection(),\n\t\t geometry=coordinates,\n\t\t name='Estimated damage level',\n\t\t keywords={'caption': caption})\n\t\treturn V\n\n\t def generate_style(self, data):\n\t\t\"\"\"Generates a polygon SLD file based on the data values\n\t\t\"\"\"\n\n\t\t# FIXME (Ole): Return static style to start with: ticket #144\n\t\tstyle = \"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<sld:StyledLayerDescriptor xmlns=\"http://www.opengis.net/sld\" xmlns:sld=\"http://www.opengis.net/sld\" xmlns:ogc=\"http://www.opengis.net/ogc\" xmlns:gml=\"http://www.opengis.net/gml\" version=\"1.0.0\">\n\t <sld:NamedLayer>\n\t <sld:Name>earthquake_impact</sld:Name>\n\t <sld:UserStyle>\n\t <sld:Name>earthquake_impact</sld:Name>\n\t <sld:Title/>\n\t <sld:FeatureTypeStyle>\n\t\t<sld:Name>name</sld:Name>\n\t\t<sld:Rule>\n\t\t <sld:Name>1</sld:Name>\n\t\t <sld:Title>Low</sld:Title>\n\t\t <ogc:Filter>\n\t\t <ogc:PropertyIsLessThan>\n\t\t <ogc:PropertyName>DMGLEVEL</ogc:PropertyName>\n\t\t <ogc:Literal>1.5</ogc:Literal>\n\t\t </ogc:PropertyIsLessThan>\n\t\t </ogc:Filter>\n\t\t <sld:PolygonSymbolizer>\n\t\t <sld:Fill>\n\t\t <sld:CssParameter name=\"fill\">#1EFC7C</sld:CssParameter>\n\t\t </sld:Fill>\n\t\t <sld:Stroke>\n\t\t <sld:CssParameter name=\"stroke\">#0EEC6C</sld:CssParameter>\n\t\t </sld:Stroke>\n\t\t </sld:PolygonSymbolizer>\n\t\t</sld:Rule>\n\t\t<sld:Rule>\n\t\t <sld:Name>2</sld:Name>\n\t\t <sld:Title>Medium</sld:Title>\n\t\t <ogc:Filter>\n\t\t <ogc:And>\n\t\t <ogc:PropertyIsGreaterThanOrEqualTo>\n\t\t <ogc:PropertyName>DMGLEVEL</ogc:PropertyName>\n\t\t <ogc:Literal>1.5</ogc:Literal>\n\t\t </ogc:PropertyIsGreaterThanOrEqualTo>\n\t\t <ogc:PropertyIsLessThan>\n\t\t <ogc:PropertyName>DMGLEVEL</ogc:PropertyName>\n\t\t <ogc:Literal>2.5</ogc:Literal>\n\t\t </ogc:PropertyIsLessThan>\n\t\t </ogc:And>\n\t\t </ogc:Filter>\n\t\t <sld:PolygonSymbolizer>\n\t\t <sld:Fill>\n\t\t <sld:CssParameter name=\"fill\">#FD8D3C</sld:CssParameter>\n\t\t </sld:Fill>\n\t\t <sld:Stroke>\n\t\t <sld:CssParameter name=\"stroke\">#ED7D2C</sld:CssParameter>\n\t\t </sld:Stroke>\n\t\t </sld:PolygonSymbolizer>\n\t\t</sld:Rule>\n\t\t<sld:Rule>\n\t\t <sld:Name>3</sld:Name>\n\t\t <sld:Title>High</sld:Title>\n\t\t <ogc:Filter>\n\t\t <ogc:PropertyIsGreaterThanOrEqualTo>\n\t\t <ogc:PropertyName>DMGLEVEL</ogc:PropertyName>\n\t\t <ogc:Literal>2.5</ogc:Literal>\n\t\t </ogc:PropertyIsGreaterThanOrEqualTo>\n\t\t </ogc:Filter>\n\t\t <sld:PolygonSymbolizer>\n\t\t <sld:Fill>\n\t\t <sld:CssParameter name=\"fill\">#F31A1C</sld:CssParameter>\n\t\t </sld:Fill>\n\t\t <sld:Stroke>\n\t\t <sld:CssParameter name=\"stroke\">#E30A0C</sld:CssParameter>\n\t\t </sld:Stroke>\n\t\t </sld:PolygonSymbolizer>\n\t\t</sld:Rule>\n\t </sld:FeatureTypeStyle>\n\t </sld:UserStyle>\n\t </sld:NamedLayer>\n\t</sld:StyledLayerDescriptor>\n\t\"\"\"\n\n\t\treturn style\n\n\t def Xgenerate_style(self, data):\n\t\t\"\"\"Generates a point SLD file based on the data values\n\t\t\"\"\"\n\n\t\t# Define default behaviour to be used when\n\t\t# - symbol attribute is missing\n\t\t# - attribute value is None or ''\n\t\tDEFAULT_SYMBOL = 'circle'\n\n\t\tsymbol_field = None\n\n\t\t# FIXME: Replace these by dict and extend below\n\t\tsymbol_keys = [None, '']\n\t\tsymbol_values = [DEFAULT_SYMBOL, DEFAULT_SYMBOL]\n\n\t\t# Predefined scales and corresponding font sizes\n\t\tscale_keys = [10000000000, 10000000, 5000000,\n\t\t 1000000, 500000, 250000, 100000]\n\t\tscale_values = [3, 5, 8, 12, 14, 16, 18]\n\n\t\t# Predefined colour classes\n\t\tclass_keys = [_('Low damage'), _('Medium damage'), _('High damage')]\n\t\tclass_values = [{'min': 0.5, 'max': 1.5,\n\t\t 'color': '#0efc7c', 'opacity': '1'},\n\t\t {'min': 1.5, 'max': 2.5,\n\t\t 'color': '#fded0c', 'opacity': '1'},\n\t\t {'min': 2.5, 'max': 3.5,\n\t\t 'color': '#e31a1c', 'opacity': '1'}]\n\n\t\tsymbols = {None: DEFAULT_SYMBOL, '': DEFAULT_SYMBOL}\n\n\t\t# Generate sld style file\n\t\tparams = dict(name=data.get_name(),\n\t\t damage_field=self.target_field,\n\t\t symbol_field=symbol_field,\n\t\t symbols=symbols,\n\t\t scales=dict(zip(scale_keys, scale_values)),\n\t\t classifications=dict(zip(class_keys, class_values)))\n\n\t\t# The styles are in $RIAB_HOME/riab/impact/templates/impact/styles\n\t\treturn render_to_string('impact/styles/point_classes.sld', params)\n" } ]
75
tobiasrausch/bamStats
https://github.com/tobiasrausch/bamStats
e620f6beff71d654ed35b4cd89810e08bafaa40d
2bec03143aebfcefedda4a86602b04754d6cd5b8
f98ca807b65aba7ccaf8703bcee1e47f119dbe87
refs/heads/master
2021-05-23T23:09:58.752345
2021-05-08T12:31:48
2021-05-08T12:31:48
52,509,133
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6164622902870178, "alphanum_fraction": 0.6279717087745667, "avg_line_length": 39.84778594970703, "blob_id": "f4e7c42aa219fd1a68097e4ea1c5d2ba13fa33b8", "content_id": "e7457e6112f50762dc7579e527b5b65a51b07d3d", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 21200, "license_type": "permissive", "max_line_length": 297, "num_lines": 519, "path": "/src/count_junction.h", "repo_name": "tobiasrausch/bamStats", "src_encoding": "UTF-8", "text": "#ifndef COUNT_JUNCTION_H\n#define COUNT_JUNCTION_H\n\n#include <limits>\n\n#include <boost/icl/split_interval_map.hpp>\n#include <boost/dynamic_bitset.hpp>\n#include <boost/unordered_map.hpp>\n#include <boost/date_time/posix_time/posix_time.hpp>\n#include <boost/date_time/gregorian/gregorian.hpp>\n#include <boost/progress.hpp>\n\n#include <htslib/sam.h>\n#include <htslib/faidx.h>\n\n#include \"version.h\"\n#include \"util.h\"\n#include \"gtf.h\"\n#include \"gff3.h\"\n#include \"bed.h\"\n\n\nnamespace bamstats\n{\n\n struct SpGp {\n int32_t sp;\n int32_t idx;\n int32_t gp;\n\n SpGp(int32_t s, int32_t i, int32_t g) : sp(s), idx(i), gp(g) {}\n\n inline bool operator==(SpGp const& other) const {\n return ((sp == other.sp) && (idx == other.idx) && (gp == other.gp));\n }\n\n inline bool operator<(SpGp const& other) const {\n return (sp < other.sp);\n }\n };\n \n struct CountJunctionConfig {\n typedef std::map<std::string, int32_t> TChrMap;\n\n bool novelJct;\n uint8_t inputFileFormat; // 0 = gtf, 1 = bed, 2 = gff3\n uint16_t minQual;\n uint16_t stranded; // 0 = unstranded, 1 = stranded, 2 = stranded (opposite)\n TChrMap nchr;\n std::string sampleName;\n std::string idname;\n std::string feature;\n boost::filesystem::path gtfFile;\n boost::filesystem::path bedFile;\n boost::filesystem::path bamFile;\n boost::filesystem::path outintra;\n boost::filesystem::path outinter;\n boost::filesystem::path outnovel;\n };\n\n\n template<typename TConfig, typename TGenomicRegions, typename TGenomicExonJunction>\n inline int32_t\n countExonJct(TConfig const& c, TGenomicRegions& gRegions, TGenomicExonJunction& ejct, TGenomicExonJunction& njct) {\n typedef typename TGenomicRegions::value_type TChromosomeRegions;\n typedef typename TGenomicExonJunction::value_type TExonJctMap;\n \n // Load bam file\n samFile* samfile = sam_open(c.bamFile.string().c_str(), \"r\");\n hts_idx_t* idx = sam_index_load(samfile, c.bamFile.string().c_str());\n bam_hdr_t* hdr = sam_hdr_read(samfile);\n\n // Parse BAM file\n boost::posix_time::ptime now = boost::posix_time::second_clock::local_time();\n std::cout << '[' << boost::posix_time::to_simple_string(now) << \"] \" << \"BAM file parsing\" << std::endl;\n boost::progress_display show_progress( hdr->n_targets );\n\n // Iterate chromosomes\n typedef std::set<SpGp> TSpGpSet;\n typedef boost::unordered_map<std::size_t, TSpGpSet> TClipReads;\n TClipReads clipReads;\n uint32_t minClipLength = 25;\n for(int32_t refIndex=0; refIndex < (int32_t) hdr->n_targets; ++refIndex) {\n ++show_progress;\n if (gRegions[refIndex].empty()) continue;\n\n // Sort by position\n std::sort(gRegions[refIndex].begin(), gRegions[refIndex].end(), SortIntervalStart<IntervalLabelId>());\n int32_t maxExonLength = 0;\n for(uint32_t i = 0; i < gRegions[refIndex].size(); ++i) {\n\tif ((gRegions[refIndex][i].end - gRegions[refIndex][i].start) > maxExonLength) {\n\t maxExonLength = gRegions[refIndex][i].end - gRegions[refIndex][i].start;\n\t}\n }\n\n // Flag junction positions\n typedef boost::dynamic_bitset<> TBitSet;\n TBitSet featureBitMap(hdr->target_len[refIndex]);\n for(uint32_t i = 0; i < gRegions[refIndex].size(); ++i) {\n\tfeatureBitMap[gRegions[refIndex][i].start] = 1;\n\tfeatureBitMap[gRegions[refIndex][i].end] = 1;\n }\n\n // Count reads\n hts_itr_t* iter = sam_itr_queryi(idx, refIndex, 0, hdr->target_len[refIndex]);\n bam1_t* rec = bam_init1();\n while (sam_itr_next(samfile, iter, rec) >= 0) {\n\tif (rec->core.flag & (BAM_FQCFAIL | BAM_FDUP | BAM_FUNMAP)) continue;\n\tif (rec->core.qual < c.minQual) continue; // Low quality read\n\n\t// Get read sequence\n\tstd::string sequence;\n\tsequence.resize(rec->core.l_qseq);\n\tuint8_t* seqptr = bam_get_seq(rec);\n\tfor (int32_t i = 0; i < rec->core.l_qseq; ++i) sequence[i] = \"=ACMGRSVTWYHKDBN\"[bam_seqi(seqptr, i)];\n\n\t// Collect all exons this read spans\n\tTSpGpSet spgpset;\n\t\n\t// Parse CIGAR\n\tuint32_t* cigar = bam_get_cigar(rec);\n\tint32_t gp = rec->core.pos; // Genomic position\n\tint32_t sp = 0; // Sequence position\n\tfor (std::size_t i = 0; i < rec->core.n_cigar; ++i) {\n\t if (bam_cigar_op(cigar[i]) == BAM_CSOFT_CLIP) {\n\t sp += bam_cigar_oplen(cigar[i]);\n\t if (bam_cigar_oplen(cigar[i]) >= minClipLength) {\n\t if (featureBitMap[gp]) spgpset.insert(SpGp(sp, refIndex, gp));\n\t }\n\t }\n\t else if (bam_cigar_op(cigar[i]) == BAM_CINS) sp += bam_cigar_oplen(cigar[i]);\n\t else if (bam_cigar_op(cigar[i]) == BAM_CDEL) gp += bam_cigar_oplen(cigar[i]);\n\t else if (bam_cigar_op(cigar[i]) == BAM_CREF_SKIP) {\n\t int32_t gpStart = gp;\n\t gp += bam_cigar_oplen(cigar[i]);\n\t int32_t gpEnd = gp;\n\t if ((featureBitMap[gpStart]) && (featureBitMap[gpEnd])) {\n\t typename TChromosomeRegions::const_iterator vIt = std::lower_bound(gRegions[refIndex].begin(), gRegions[refIndex].end(), IntervalLabelId(std::max(0, gpStart - maxExonLength)), SortIntervalStart<IntervalLabelId>());\n\t for(; vIt != gRegions[refIndex].end(); ++vIt) {\n\t\tif (vIt->end < gpStart) continue;\n\t\tif (vIt->start > gpStart) break; // Sorted intervals so we can stop searching\n\t\tif (vIt->end == gpStart) {\n\t\t if (!_strandOkay(rec, vIt->strand, c.stranded)) continue; // Check strand\n\t\t // Find junction partner\n\t\t typename TChromosomeRegions::const_iterator vItNext = vIt;\n\t\t ++vItNext;\n\t\t for(; vItNext != gRegions[refIndex].end(); ++vItNext) {\n\t\t if (vItNext->end < gpEnd) continue;\n\t\t if (vItNext->start > gpEnd) break; // Sorted intervals so we can stop searching\n\t\t if (vItNext->start == gpEnd) {\n\t\t if (!_strandOkay(rec, vItNext->strand, c.stranded)) continue; // Check strand\n\t\t // Count Exon-Exon Junction\n\t\t if (vIt->eid < vItNext->eid) {\n\t\t\tint32_t e1 = vIt->eid;\n\t\t\tint32_t e2 = vItNext->eid;\n\t\t\tif (e2 < e1) {\n\t\t\t e1 = vItNext->eid;\n\t\t\t e2 = vIt->eid;\n\t\t\t}\n\t\t\ttypename TExonJctMap::iterator itEjct = ejct[refIndex].find(std::make_pair(e1, e2));\n\t\t\tif (itEjct != ejct[refIndex].end()) ++itEjct->second;\n\t\t\telse ejct[refIndex].insert(std::make_pair(std::make_pair(e1, e2), 1));\n\t\t }\n\t\t }\n\t\t }\n\t\t}\n\t }\n\t } else {\n\t if (c.novelJct) {\n\t\ttypename TExonJctMap::iterator itNjct = njct[refIndex].find(std::make_pair(gpStart, gpEnd));\n\t\tif (itNjct != njct[refIndex].end()) ++itNjct->second;\n\t\telse njct[refIndex].insert(std::make_pair(std::make_pair(gpStart, gpEnd), 1));\n\t }\n\t }\n\t }\n\t else if (bam_cigar_op(cigar[i]) == BAM_CHARD_CLIP) {\n\t if (bam_cigar_oplen(cigar[i]) >= minClipLength) {\n\t if (featureBitMap[gp]) spgpset.insert(SpGp(sp, refIndex, gp));\n\t }\n\t } else if ((bam_cigar_op(cigar[i]) == BAM_CMATCH) || (bam_cigar_op(cigar[i]) == BAM_CEQUAL) || (bam_cigar_op(cigar[i]) == BAM_CDIFF)) {\n\t sp += bam_cigar_oplen(cigar[i]);\n\t gp += bam_cigar_oplen(cigar[i]);\n\t } else {\n\t std::cerr << \"Unknown Cigar options\" << std::endl;\n\t return 1;\n\t }\n\t}\n\n\t// Read might have secondary alignments so append\n\tif (!spgpset.empty()) {\n\t std::size_t hr = hash_read(rec);\n\t if (clipReads.find(hr) == clipReads.end()) clipReads[hr] = spgpset;\n\t else clipReads[hr].insert(spgpset.begin(), spgpset.end());\n\t}\n }\n // Clean-up\n bam_destroy1(rec);\n hts_itr_destroy(iter);\n }\n\n // Post-process the soft-clipped reads\n for(typename TClipReads::const_iterator itC = clipReads.begin(); itC != clipReads.end(); ++itC) {\n for(typename TSpGpSet::const_iterator itSp = itC->second.begin(); itSp != itC->second.end(); ++itSp) {\n\t//std::cerr << itSp->sp << '\\t' << itSp->idx << '\\t' << itSp->gp << std::endl;\n }\n //std::cerr << std::endl;\n }\n\n // clean-up\n bam_hdr_destroy(hdr);\n hts_idx_destroy(idx);\n sam_close(samfile);\n return 0;\n }\n \n template<typename TConfig>\n inline int32_t\n countJunctionRun(TConfig const& c) {\n\n#ifdef PROFILE\n ProfilerStart(\"alfred.prof\");\n#endif\n\n // Parse GTF file\n typedef std::vector<IntervalLabelId> TChromosomeRegions;\n typedef std::vector<TChromosomeRegions> TGenomicRegions;\n TGenomicRegions gRegions(c.nchr.size(), TChromosomeRegions());\n typedef std::vector<std::string> TGeneIds;\n TGeneIds geneIds;\n int32_t tf = 0;\n if (c.inputFileFormat == 0) tf = parseGTFAll(c, gRegions, geneIds);\n else if (c.inputFileFormat == 1) tf = parseBEDAll(c, gRegions, geneIds);\n else if (c.inputFileFormat == 2) tf = parseGFF3All(c, gRegions, geneIds);\n if (tf == 0) {\n std::cerr << \"Error parsing GTF/GFF3/BED file!\" << std::endl;\n return 1;\n }\n\n // Exon junction counting\n typedef std::pair<int32_t, int32_t> TExonPair;\n typedef std::map<TExonPair, uint32_t> TExonJctCount;\n typedef std::vector<TExonJctCount> TGenomicExonJctCount;\n TGenomicExonJctCount ejct(c.nchr.size(), TExonJctCount());\n TGenomicExonJctCount njct(c.nchr.size(), TExonJctCount());\n int32_t retparse = countExonJct(c, gRegions, ejct, njct);\n if (retparse != 0) {\n std::cerr << \"Error exon junction counting!\" << std::endl;\n return 1;\n }\n\n // Mapping refIndex -> chromosome name\n typedef std::vector<std::string> TChrName;\n TChrName chrName(c.nchr.size());\n for(int32_t refIndex=0; refIndex < (int32_t) c.nchr.size(); ++refIndex) {\n chrName[refIndex] = \"NA\";\n for(typename CountJunctionConfig::TChrMap::const_iterator itC = c.nchr.begin(); itC != c.nchr.end(); ++itC) {\n\tif (itC->second == refIndex) {\n\t chrName[refIndex] = itC->first;\n\t break;\n\t}\n }\n }\n \n // Intra-gene table\n boost::posix_time::ptime now = boost::posix_time::second_clock::local_time();\n std::cout << '[' << boost::posix_time::to_simple_string(now) << \"] \" << \"Output intra-gene splicing table\" << std::endl;\n boost::progress_display show_progress( c.nchr.size() );\n std::ofstream intrafile(c.outintra.string().c_str());\n intrafile << \"gene\\texonA\\texonB\\t\" << c.sampleName << std::endl;\n for(int32_t refIndex=0; refIndex < (int32_t) c.nchr.size(); ++refIndex) {\n ++show_progress;\n if (gRegions[refIndex].empty()) continue;\n\n // Output intra-gene exon-exon junction support\n for(typename TChromosomeRegions::iterator itR = gRegions[refIndex].begin(); itR != gRegions[refIndex].end(); ++itR) {\n\ttypename TChromosomeRegions::iterator itRNext = itR;\n\t++itRNext;\n\tfor(; itRNext != gRegions[refIndex].end(); ++itRNext) {\n\t if ((itR->lid == itRNext->lid) && (itR->end < itRNext->start)) {\n\t intrafile << geneIds[itR->lid] << '\\t' << chrName[refIndex] << ':' << itR->start << '-' << itR->end << '\\t' << chrName[refIndex] << ':' << itRNext->start << '-' << itRNext->end << '\\t';\n\t int32_t leid = itR->eid;\n\t int32_t heid = itRNext->eid;\n\t if (leid > heid) {\n\t leid = itRNext->eid;\n\t heid = itR->eid;\n\t }\n\t typename TExonJctCount::iterator itE = ejct[refIndex].find(std::make_pair(leid, heid));\n\t if (itE != ejct[refIndex].end()) intrafile << itE->second << std::endl;\n\t else intrafile << '0' << std::endl;\n\t }\n\t}\n }\n }\n intrafile.close();\n\n // Mapping exon id to gene id\n typedef std::vector<int32_t> TEidToLid;\n typedef std::vector< std::pair<int32_t, int32_t> > TEidToCoord;\n typedef std::vector<int32_t> TLidToRefIndex;\n TEidToLid etol;\n TEidToCoord ecoord;\n TLidToRefIndex lidToRefIndex(geneIds.size());\n for(int32_t refIndex=0; refIndex < (int32_t) c.nchr.size(); ++refIndex) {\n for (typename TChromosomeRegions::const_iterator itG = gRegions[refIndex].begin(); itG != gRegions[refIndex].end(); ++itG) {\n\tif (itG->eid >= (int32_t) etol.size()) {\n\t etol.resize(itG->eid + 1);\n\t ecoord.resize(itG->eid + 1);\n\t}\n\tetol[itG->eid] = itG->lid;\n\tecoord[itG->eid] = std::make_pair(itG->start, itG->end);\n\tlidToRefIndex[itG->lid] = refIndex;\n }\n }\n\n // Inter-gene table\n now = boost::posix_time::second_clock::local_time();\n std::cout << '[' << boost::posix_time::to_simple_string(now) << \"] \" << \"Output inter-gene splicing table\" << std::endl;\n boost::progress_display spr( c.nchr.size() );\n std::ofstream interfile(c.outinter.string().c_str());\n interfile << \"geneA\\texonA\\tgeneB\\texonB\\t\" << c.sampleName << std::endl;\n for(int32_t refIndex=0; refIndex < (int32_t) c.nchr.size(); ++refIndex) {\n ++spr;\n for(typename TExonJctCount::const_iterator itE = ejct[refIndex].begin(); itE != ejct[refIndex].end(); ++itE) {\n\tint32_t e1 = itE->first.first;\n\tint32_t e2 = itE->first.second;\n\t// Different Genes?\n\tif (etol[e1] != etol[e2]) {\n\t interfile << geneIds[etol[e1]] << '\\t' << chrName[lidToRefIndex[etol[e1]]] << ':' << ecoord[e1].first << '-' << ecoord[e1].second << '\\t' << geneIds[etol[e2]] << '\\t' << chrName[lidToRefIndex[etol[e2]]] << ':' << ecoord[e2].first << '-' << ecoord[e2].second << '\\t' << itE->second << std::endl;\n\t}\n }\n }\n interfile.close();\n\n if (c.novelJct) {\n // Start and end of genes (independent of chromosome, needs to be checked afterwards!)\n typedef std::vector<IntervalLabel> TGeneRegions;\n TGeneRegions geneReg(geneIds.size(), IntervalLabel(0));\n int32_t maxGeneLength = 0;\n for(int32_t refIndex=0; refIndex < (int32_t) c.nchr.size(); ++refIndex) {\n\tfor (typename TChromosomeRegions::const_iterator itG = gRegions[refIndex].begin(); itG != gRegions[refIndex].end(); ++itG) {\n\t if (geneReg[itG->lid].lid == -1) {\n\t geneReg[itG->lid].start = itG->start;\n\t geneReg[itG->lid].end = itG->end;\n\t geneReg[itG->lid].strand = itG->strand;\n\t geneReg[itG->lid].lid = itG->lid;\n\t } else {\n\t if (itG->start < geneReg[itG->lid].start) geneReg[itG->lid].start = itG->start;\n\t if (itG->end > geneReg[itG->lid].end) geneReg[itG->lid].end = itG->end;\n\t }\n\t if ((geneReg[itG->lid].end - geneReg[itG->lid].start) > maxGeneLength) maxGeneLength = (geneReg[itG->lid].end - geneReg[itG->lid].start);\n\t}\n }\n\n // Sort by start position\n std::sort(geneReg.begin(), geneReg.end(), SortIntervalStart<IntervalLabel>());\n\n // Novel intra-chromosomal splice junctions\n now = boost::posix_time::second_clock::local_time();\n std::cout << '[' << boost::posix_time::to_simple_string(now) << \"] \" << \"Output novel splicing table\" << std::endl;\n boost::progress_display sprgr( c.nchr.size() );\n std::ofstream novelfile(c.outnovel.string().c_str());\n novelfile << \"geneA\\tpositionA\\tgeneB\\tpositionB\\t\" << c.sampleName << std::endl;\n for(int32_t refIndex=0; refIndex < (int32_t) c.nchr.size(); ++refIndex) {\n\t++sprgr;\n\tfor(typename TExonJctCount::const_iterator itN = njct[refIndex].begin(); itN != njct[refIndex].end(); ++itN) {\n\t int32_t p1 = itN->first.first;\n\t std::string geneA = \"NA\";\n\t typename TGeneRegions::const_iterator gIt1 = std::lower_bound(geneReg.begin(), geneReg.end(), IntervalLabel(std::max(0, p1 - maxGeneLength)), SortIntervalStart<IntervalLabel>());\n\t for(; gIt1 != geneReg.end(); ++gIt1) {\n\t if (gIt1->end < p1) continue;\n\t if (gIt1->start > p1) break; // Sorted intervals so we can stop searching\n\t if (lidToRefIndex[gIt1->lid] == refIndex) {\n\t geneA = geneIds[gIt1->lid];\n\t break;\n\t }\n\t }\n\t int32_t p2 = itN->first.second;\n\t std::string geneB = \"NA\";\n\t typename TGeneRegions::const_iterator gIt2 = std::lower_bound(geneReg.begin(), geneReg.end(), IntervalLabel(std::max(0, p2 - maxGeneLength)), SortIntervalStart<IntervalLabel>());\n\t for(; gIt2 != geneReg.end(); ++gIt2) {\n\t if (gIt2->end < p2) continue;\n\t if (gIt2->start > p2) break; // Sorted intervals so we can stop searching\n\t if (lidToRefIndex[gIt2->lid] == refIndex) {\n\t geneB = geneIds[gIt2->lid];\n\t break;\n\t }\n\t }\n\t novelfile << geneA << '\\t' << chrName[refIndex] << ':' << p1 << '\\t' << geneB << '\\t' << chrName[refIndex] << ':' << p2 << '\\t' << itN->second << std::endl;\n\t}\n }\n novelfile.close();\n }\n \n // Done\n now = boost::posix_time::second_clock::local_time();\n std::cout << '[' << boost::posix_time::to_simple_string(now) << \"] Done.\" << std::endl;\n \n#ifdef PROFILE\n ProfilerStop();\n#endif\n\n return 0;\n }\n\n\n int count_junction(int argc, char **argv) {\n CountJunctionConfig c;\n\n // Parameter\n boost::program_options::options_description generic(\"Generic options\");\n generic.add_options()\n (\"help,?\", \"show help message\")\n (\"map-qual,m\", boost::program_options::value<uint16_t>(&c.minQual)->default_value(10), \"min. mapping quality\")\n (\"stranded,s\", boost::program_options::value<uint16_t>(&c.stranded)->default_value(0), \"strand-specific counting (0: unstranded, 1: stranded, 2: reverse stranded)\")\n (\"outintra,o\", boost::program_options::value<boost::filesystem::path>(&c.outintra)->default_value(\"intra.tsv\"), \"intra-gene exon-exon junction reads\")\n (\"outinter,p\", boost::program_options::value<boost::filesystem::path>(&c.outinter)->default_value(\"inter.tsv\"), \"inter-gene exon-exon junction reads\")\n (\"outnovel,n\", boost::program_options::value<boost::filesystem::path>(&c.outnovel), \"output file for not annotated intra-chromosomal junction reads\")\n ;\n\n boost::program_options::options_description gtfopt(\"GTF/GFF3 input file options\");\n gtfopt.add_options()\n (\"gtf,g\", boost::program_options::value<boost::filesystem::path>(&c.gtfFile), \"gtf/gff3 file\")\n (\"id,i\", boost::program_options::value<std::string>(&c.idname)->default_value(\"gene_id\"), \"gtf/gff3 attribute\")\n (\"feature,f\", boost::program_options::value<std::string>(&c.feature)->default_value(\"exon\"), \"gtf/gff3 feature\")\n ;\n\n boost::program_options::options_description bedopt(\"BED input file options, columns chr, start, end, name [, score, strand]\");\n bedopt.add_options()\n (\"bed,b\", boost::program_options::value<boost::filesystem::path>(&c.bedFile), \"bed file\")\n ;\n \n boost::program_options::options_description hidden(\"Hidden options\");\n hidden.add_options()\n (\"input-file\", boost::program_options::value<boost::filesystem::path>(&c.bamFile), \"input bam file\")\n ;\n\n boost::program_options::positional_options_description pos_args;\n pos_args.add(\"input-file\", -1);\n\n boost::program_options::options_description cmdline_options;\n cmdline_options.add(generic).add(gtfopt).add(bedopt).add(hidden);\n boost::program_options::options_description visible_options;\n visible_options.add(generic).add(gtfopt).add(bedopt);\n\n // Parse command-line\n boost::program_options::variables_map vm;\n boost::program_options::store(boost::program_options::command_line_parser(argc, argv).options(cmdline_options).positional(pos_args).run(), vm);\n boost::program_options::notify(vm);\n\n // Check command line arguments\n if ((vm.count(\"help\")) || (!vm.count(\"input-file\")) || ((!vm.count(\"gtf\")) && (!vm.count(\"bed\")))) {\n std::cout << std::endl;\n std::cout << \"Usage: alfred \" << argv[0] << \" [OPTIONS] -g <hg19.gtf.gz> <aligned.bam>\" << std::endl;\n std::cout << visible_options << \"\\n\";\n return 1;\n }\n\n // Novel junctions\n if (vm.count(\"outnovel\")) c.novelJct = true;\n else c.novelJct = false;\n\n // Check bam file\n if (!(boost::filesystem::exists(c.bamFile) && boost::filesystem::is_regular_file(c.bamFile) && boost::filesystem::file_size(c.bamFile))) {\n std::cerr << \"Alignment file is missing: \" << c.bamFile.string() << std::endl;\n return 1;\n } else {\n samFile* samfile = sam_open(c.bamFile.string().c_str(), \"r\");\n if (samfile == NULL) {\n\tstd::cerr << \"Fail to open file \" << c.bamFile.string() << std::endl;\n\treturn 1;\n }\n hts_idx_t* idx = sam_index_load(samfile, c.bamFile.string().c_str());\n if (idx == NULL) {\n\tif (bam_index_build(c.bamFile.string().c_str(), 0) != 0) {\n\t std::cerr << \"Fail to open index for \" << c.bamFile.string() << std::endl;\n\t return 1;\n\t}\n }\n bam_hdr_t* hdr = sam_hdr_read(samfile);\n for(int32_t refIndex=0; refIndex < hdr->n_targets; ++refIndex) c.nchr.insert(std::make_pair(hdr->target_name[refIndex], refIndex));\n \n\t// Get sample name\n std::string sampleName;\n if (!getSMTag(std::string(hdr->text), c.bamFile.stem().string(), sampleName)) {\n\tstd::cerr << \"Only one sample (@RG:SM) is allowed per input BAM file \" << c.bamFile.string() << std::endl;\n\treturn 1;\n } else c.sampleName = sampleName;\n bam_hdr_destroy(hdr);\n hts_idx_destroy(idx);\n sam_close(samfile);\n }\n\n // Check region file\n if (!(boost::filesystem::exists(c.gtfFile) && boost::filesystem::is_regular_file(c.gtfFile) && boost::filesystem::file_size(c.gtfFile))) {\n if (!(boost::filesystem::exists(c.bedFile) && boost::filesystem::is_regular_file(c.bedFile) && boost::filesystem::file_size(c.bedFile))) {\n\tstd::cerr << \"Input gtf/bed file is missing.\" << std::endl;\n\treturn 1;\n } else c.inputFileFormat = 1;\n } else {\n if (is_gff3(c.gtfFile)) c.inputFileFormat = 2;\n else c.inputFileFormat = 0;\n }\n\n // Show cmd\n boost::posix_time::ptime now = boost::posix_time::second_clock::local_time();\n std::cout << '[' << boost::posix_time::to_simple_string(now) << \"] \";\n std::cout << \"alfred \";\n for(int i=0; i<argc; ++i) { std::cout << argv[i] << ' '; }\n std::cout << std::endl;\n\n return countJunctionRun(c);\n }\n \n\n\n \n}\n\n#endif\n" }, { "alpha_fraction": 0.6730552315711975, "alphanum_fraction": 0.6899662017822266, "avg_line_length": 31.851852416992188, "blob_id": "36b26f5a32d4880e4d1bfe247df9b71cfab62069", "content_id": "2403b61e4b5214432b068c270db9bfdf2424a403", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 887, "license_type": "permissive", "max_line_length": 197, "num_lines": 27, "path": "/test/Makefile", "repo_name": "tobiasrausch/bamStats", "src_encoding": "UTF-8", "text": "SHELL := /bin/bash\n\n# Targets\nTARGETS = .conda .channels .alfred .test\nPBASE=$(shell pwd)\n\nall: \t$(TARGETS)\n\n.conda:\n\twget 'https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh' && bash Miniconda3-latest-Linux-x86_64.sh -b -p ${PBASE}/bin && rm -f Miniconda3-latest-Linux-x86_64.sh && touch .conda\n\n.channels: .conda\n\texport PATH=${PBASE}/bin/bin:${PATH} && conda config --add channels defaults && conda config --add channels conda-forge && conda config --add channels bioconda && touch .channels\n\n.alfred: .conda .channels\n\texport PATH=${PBASE}/bin/bin:${PATH} && conda install alfred && touch .alfred\n\n.test: .conda .channels .alfred\n\texport PATH=${PBASE}/bin/bin:${PATH} && alfred --help && touch .test\n\nclean:\n\texport PATH=${PBASE}/bin/bin:${PATH} && conda remove alfred && rm .alfred\n\ndistclean:\n\trm -rf $(TARGETS) $(TARGETS:=.o) bin/\n\n.PHONY: distclean clean all\n" }, { "alpha_fraction": 0.6046016216278076, "alphanum_fraction": 0.6156030297279358, "avg_line_length": 34.22291564941406, "blob_id": "628f679ef93bfe7f7c54a34d556f40bf49a00476", "content_id": "d9bcd1fdb1fef973ff16b99ea1185717180ece7a", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 16907, "license_type": "permissive", "max_line_length": 173, "num_lines": 480, "path": "/src/count_dna.h", "repo_name": "tobiasrausch/bamStats", "src_encoding": "UTF-8", "text": "#ifndef COUNT_H\n#define COUNT_H\n\n#include <limits>\n\n#include <boost/icl/split_interval_map.hpp>\n#include <boost/dynamic_bitset.hpp>\n#include <boost/unordered_map.hpp>\n#include <boost/date_time/posix_time/posix_time.hpp>\n#include <boost/date_time/gregorian/gregorian.hpp>\n#include <boost/progress.hpp>\n\n#include <htslib/sam.h>\n\n#include \"version.h\"\n#include \"util.h\"\n\n\nnamespace bamstats\n{\n\n struct CountDNAConfig {\n bool fragments;\n uint32_t fraglow;\n uint32_t fraghigh; \n uint32_t window_size;\n uint32_t window_offset;\n uint32_t window_num;\n uint16_t minQual;\n bool hasIntervalFile;\n std::string sampleName;\n std::vector<bool> validChr;\n boost::filesystem::path bamFile;\n boost::filesystem::path outfile;\n boost::filesystem::path int_file;\n };\n\n struct ItvChr {\n int32_t start;\n int32_t end;\n std::string id;\n };\n\n template<typename TConfig> \n inline bool\n createIntervals(TConfig const& c, std::string const& chr, uint32_t const target_len, std::vector<ItvChr>& intvec) {\n if (c.hasIntervalFile) {\n if (!is_gz(c.int_file)) {\n\tstd::ifstream interval_file(c.int_file.string().c_str(), std::ifstream::in);\n\tif (interval_file.is_open()) {\n\t while (interval_file.good()) {\n\t std::string intervalLine;\n\t getline(interval_file, intervalLine);\n\t typedef boost::tokenizer< boost::char_separator<char> > Tokenizer;\n\t boost::char_separator<char> sep(\" \\t,;\");\n\t Tokenizer tokens(intervalLine, sep);\n\t Tokenizer::iterator tokIter = tokens.begin();\n\t if (tokIter!=tokens.end()) {\n\t std::string chrName=*tokIter++;\n\t if (chrName == chr) {\n\t\tif (tokIter!=tokens.end()) {\n\t\t ItvChr itv;\n\t\t itv.start = boost::lexical_cast<int32_t>(*tokIter++);\n\t\t itv.end = boost::lexical_cast<int32_t>(*tokIter++);\n\t\t if (itv.start < 0) {\n\t\t std::cerr << \"Interval start < 0\" << std::endl;\n\t\t return false;\n\t\t }\n\t\t if (itv.end < 0) {\n\t\t std::cerr << \"Interval end < 0\" << std::endl;\n\t\t return false;\n\t\t }\n\t\t if (itv.start >= itv.end) {\n\t\t std::cerr << \"Interval start > interval end\" << std::endl;\n\t\t return false;\n\t\t }\n\t\t itv.id = *tokIter;\n\t\t intvec.push_back(itv);\n\t\t}\n\t }\n\t }\n\t }\n\t interval_file.close();\n\t}\n } else {\n\tstd::ifstream file(c.int_file.string().c_str(), std::ios_base::in | std::ios_base::binary);\n\tboost::iostreams::filtering_streambuf<boost::iostreams::input> dataIn;\n\tdataIn.push(boost::iostreams::gzip_decompressor());\n\tdataIn.push(file);\n\tstd::istream instream(&dataIn);\n\tstd::string intervalLine;\n\twhile(std::getline(instream, intervalLine)) {\n\t typedef boost::tokenizer< boost::char_separator<char> > Tokenizer;\n\t boost::char_separator<char> sep(\" \\t,;\");\n\t Tokenizer tokens(intervalLine, sep);\n\t Tokenizer::iterator tokIter = tokens.begin();\n\t if (tokIter!=tokens.end()) {\n\t std::string chrName=*tokIter++;\n\t if (chrName == chr) {\n\t if (tokIter!=tokens.end()) {\n\t\tItvChr itv;\n\t\titv.start = boost::lexical_cast<int32_t>(*tokIter++);\n\t\titv.end = boost::lexical_cast<int32_t>(*tokIter++);\n\t\tif (itv.start < 0) {\n\t\t std::cerr << \"Interval start < 0\" << std::endl;\n\t\t return false;\n\t\t}\n\t\tif (itv.end < 0) {\n\t\t std::cerr << \"Interval end < 0\" << std::endl;\n\t\t return false;\n\t\t}\n\t\tif (itv.start >= itv.end) {\n\t\t std::cerr << \"Interval start > interval end\" << std::endl;\n\t\t return false;\n\t\t}\n\t\titv.id = *tokIter;\n\t\tintvec.push_back(itv);\n\t }\n\t }\n\t }\n\t}\n\tfile.close();\n }\n } else {\n // Create artificial intervals\n uint32_t pos = 0;\n unsigned int wSize = c.window_size;\n unsigned int wOffset = c.window_offset;\n if (c.window_num > 0) {\n\twSize=(target_len / c.window_num) + 1;\n\twOffset=wSize;\n }\n while (pos < target_len) {\n\tuint32_t window_len = pos+wSize;\n\tif (window_len > target_len) window_len = target_len;\n\tItvChr itv;\n\titv.start = pos;\n\titv.end = window_len;\n\titv.id = chr + \":\" + boost::lexical_cast<std::string>(itv.start) + \"-\" + boost::lexical_cast<std::string>(itv.end);\n\tintvec.push_back(itv);\n\tpos += wOffset;\n }\n }\n return true;\n }\n\n \n template<typename TConfig>\n inline int32_t\n bam_dna_counter(TConfig const& c) {\n \n // Load bam file\n samFile* samfile = sam_open(c.bamFile.string().c_str(), \"r\");\n hts_idx_t* idx = sam_index_load(samfile, c.bamFile.string().c_str());\n bam_hdr_t* hdr = sam_hdr_read(samfile);\n\n // Parse BAM file\n boost::posix_time::ptime now = boost::posix_time::second_clock::local_time();\n std::cout << '[' << boost::posix_time::to_simple_string(now) << \"] \" << \"BAM file parsing\" << std::endl;\n boost::progress_display show_progress( hdr->n_targets );\n\n // Mate map\n typedef boost::unordered_map<std::size_t, bool> TMateMap;\n TMateMap mateMap;\n\n // Open output file\n boost::iostreams::filtering_ostream dataOut;\n dataOut.push(boost::iostreams::gzip_compressor());\n dataOut.push(boost::iostreams::file_sink(c.outfile.string().c_str(), std::ios_base::out | std::ios_base::binary));\n dataOut << \"chr\\tstart\\tend\\tid\\t\" << c.sampleName << std::endl;\n \n // Iterate chromosomes\n for(int32_t refIndex=0; refIndex < (int32_t) hdr->n_targets; ++refIndex) {\n ++show_progress;\n\n // Any regions on this chromosome?\n if (!c.validChr[refIndex]) continue;\n\n // Check we have mapped reads on this chromosome\n bool nodata = true;\n std::string suffix(\"cram\");\n std::string str(c.bamFile.string());\n if ((str.size() >= suffix.size()) && (str.compare(str.size() - suffix.size(), suffix.size(), suffix) == 0)) nodata = false;\n uint64_t mapped = 0;\n uint64_t unmapped = 0;\n hts_idx_get_stat(idx, refIndex, &mapped, &unmapped);\n if (mapped) nodata = false;\n if (nodata) continue;\n\n // Coverage track\n typedef uint16_t TCount;\n uint32_t maxCoverage = std::numeric_limits<TCount>::max();\n typedef std::vector<TCount> TCoverage;\n TCoverage cov(hdr->target_len[refIndex], 0);\n \n // Count reads\n hts_itr_t* iter = sam_itr_queryi(idx, refIndex, 0, hdr->target_len[refIndex]);\n bam1_t* rec = bam_init1();\n int32_t lastAlignedPos = 0;\n std::set<std::size_t> lastAlignedPosReads;\n while (sam_itr_next(samfile, iter, rec) >= 0) {\n\tif (rec->core.flag & (BAM_FSECONDARY | BAM_FQCFAIL | BAM_FDUP | BAM_FSUPPLEMENTARY | BAM_FUNMAP)) continue;\n\tif ((rec->core.flag & BAM_FPAIRED) && ((rec->core.flag & BAM_FMUNMAP) || (rec->core.tid != rec->core.mtid))) continue;\n\tif (rec->core.qual < c.minQual) continue;\n\n\tif (rec->core.flag & BAM_FPAIRED) {\n\t // Clean-up the read store for identical alignment positions\n\t if (rec->core.pos > lastAlignedPos) {\n\t lastAlignedPosReads.clear();\n\t lastAlignedPos = rec->core.pos;\n\t }\n\t\n\t if ((rec->core.pos < rec->core.mpos) || ((rec->core.pos == rec->core.mpos) && (lastAlignedPosReads.find(hash_string(bam_get_qname(rec))) == lastAlignedPosReads.end()))) {\n\t // First read\n\t lastAlignedPosReads.insert(hash_string(bam_get_qname(rec)));\n\t std::size_t hv = hash_pair(rec);\n\t mateMap[hv] = true;\n\t } else {\n\t // Second read\n\t std::size_t hv = hash_pair_mate(rec);\n\t if ((mateMap.find(hv) == mateMap.end()) || (!mateMap[hv])) continue; // Mate discarded\n\t mateMap[hv] = false;\n\n\t // Count mid point\n\t if (c.fragments) {\n\t int32_t fraglen = rec->core.pos + alignmentLength(rec) - rec->core.mpos;\n\t if ((fraglen >= 0) && ((uint32_t) fraglen >= c.fraglow) && ((uint32_t) fraglen < c.fraghigh)) {\n\t\tint32_t fmidpoint = rec->core.mpos + fraglen / 2;\n\t\tif ((fmidpoint < (int32_t) hdr->target_len[refIndex]) && (cov[fmidpoint] < maxCoverage - 1)) ++cov[fmidpoint];\n\t }\n\t } else {\n\t int32_t midPoint = rec->core.pos + halfAlignmentLength(rec);\n\t if ((midPoint < (int32_t) hdr->target_len[refIndex]) && (cov[midPoint] < maxCoverage - 1)) ++cov[midPoint];\n\t }\n\t }\n\t} else {\n\t // Count mid point\n\t int32_t midPoint = rec->core.pos + halfAlignmentLength(rec);\n\t if ((midPoint < (int32_t) hdr->target_len[refIndex]) && (cov[midPoint] < maxCoverage - 1)) ++cov[midPoint];\n\t}\n }\n // Clean-up\n bam_destroy1(rec);\n hts_itr_destroy(iter);\n mateMap.clear();\n\n // Assign read counts\n std::vector<ItvChr> itv;\n if (!createIntervals(c, std::string(hdr->target_name[refIndex]), hdr->target_len[refIndex], itv)) {\n\tstd::cerr << \"Interval parsing failed!\" << std::endl;\n\treturn 1;\n }\n std::sort(itv.begin(), itv.end(), SortIntervalStart<ItvChr>());\n for(uint32_t i = 0; i < itv.size(); ++i) {\n\tuint64_t covsum = 0;\n\tfor(int32_t k = itv[i].start; k < itv[i].end; ++k) covsum += cov[k];\n\tdataOut << std::string(hdr->target_name[refIndex]) << \"\\t\" << itv[i].start << \"\\t\" << itv[i].end << \"\\t\" << itv[i].id << \"\\t\" << covsum << std::endl;\n }\n }\n\t \n // clean-up\n bam_hdr_destroy(hdr);\n hts_idx_destroy(idx);\n sam_close(samfile);\n dataOut.pop();\n \n return 0;\n }\n\n \n template<typename TConfig>\n inline int32_t\n countDNARun(TConfig const& c) {\n\n#ifdef PROFILE\n ProfilerStart(\"alfred.prof\");\n#endif\n\n int32_t retparse = bam_dna_counter(c);\n if (retparse != 0) {\n std::cerr << \"Error in read counting!\" << std::endl;\n return 1;\n }\n\n boost::posix_time::ptime now = boost::posix_time::second_clock::local_time();\n std::cout << '[' << boost::posix_time::to_simple_string(now) << \"] Done.\" << std::endl;\n \n#ifdef PROFILE\n ProfilerStop();\n#endif\n\n return 0;\n }\n\n\n int count_dna(int argc, char **argv) {\n CountDNAConfig c;\n std::string fragmentString;\n\n // Parameter\n boost::program_options::options_description generic(\"Generic options\");\n generic.add_options()\n (\"help,?\", \"show help message\")\n (\"map-qual,m\", boost::program_options::value<uint16_t>(&c.minQual)->default_value(10), \"min. mapping quality\")\n (\"outfile,o\", boost::program_options::value<boost::filesystem::path>(&c.outfile)->default_value(\"cov.gz\"), \"coverage output file\")\n (\"fragments,f\", boost::program_options::value<std::string>(&fragmentString), \"count illumina PE fragments using lower and upper bound on insert size, i.e. -f 0,10000\")\n ;\n\n boost::program_options::options_description window(\"Window options\");\n window.add_options()\n (\"window-size,s\", boost::program_options::value<uint32_t>(&c.window_size)->default_value(10000), \"window size\")\n (\"window-offset,t\", boost::program_options::value<uint32_t>(&c.window_offset)->default_value(10000), \"window offset\")\n (\"window-num,n\", boost::program_options::value<uint32_t>(&c.window_num)->default_value(0), \"#windows per chr, used if #n>0\")\n (\"interval-file,i\", boost::program_options::value<boost::filesystem::path>(&c.int_file), \"interval file, used if present\")\n ;\n\n boost::program_options::options_description hidden(\"Hidden options\");\n hidden.add_options()\n (\"input-file\", boost::program_options::value<boost::filesystem::path>(&c.bamFile), \"input bam file\")\n ;\n\n boost::program_options::positional_options_description pos_args;\n pos_args.add(\"input-file\", -1);\n\n // Set the visibility\n boost::program_options::options_description cmdline_options;\n cmdline_options.add(generic).add(window).add(hidden);\n boost::program_options::options_description visible_options;\n visible_options.add(generic).add(window);\n\n // Parse command-line\n boost::program_options::variables_map vm;\n boost::program_options::store(boost::program_options::command_line_parser(argc, argv).options(cmdline_options).positional(pos_args).run(), vm);\n boost::program_options::notify(vm);\n\n // Check command line arguments\n if ((vm.count(\"help\")) || (!vm.count(\"input-file\"))) {\n std::cout << std::endl;\n std::cout << \"Usage: alfred \" << argv[0] << \" [OPTIONS] <aligned.bam>\" << std::endl;\n std::cout << visible_options << \"\\n\";\n return 1;\n }\n\n // Fragment midpoint counting\n if (vm.count(\"fragments\")) {\n c.fragments = true;\n std::vector<std::string> parts;\n boost::split(parts, fragmentString, boost::is_any_of(\",\"));\n if (parts.size() == 2) {\n\tint32_t fraglow = boost::lexical_cast<int32_t>(parts[0]);\n\tint32_t fraghigh = boost::lexical_cast<int32_t>(parts[1]);\n\tif ((fraglow >= 0) && (fraghigh >= 0) && (fraglow < fraghigh)) {\n\t c.fraglow = fraglow;\n\t c.fraghigh = fraghigh;\n\t} else {\n\t std::cerr << \"Lower bound needs to be smaller than upper bound for insert size and all bounds >= 0!\" << std::endl;\n\t return 1;\n\t}\n } else {\n\tstd::cerr << \"Could not parse lower and upper bound on insert size. Format is -f 50,1000 without any spaces before or after the comma!\" << std::endl;\n\treturn 1;\n }\n } else c.fragments = false;\n\n // Check bam file\n if (!(boost::filesystem::exists(c.bamFile) && boost::filesystem::is_regular_file(c.bamFile) && boost::filesystem::file_size(c.bamFile))) {\n std::cerr << \"Alignment file is missing: \" << c.bamFile.string() << std::endl;\n return 1;\n } else {\n samFile* samfile = sam_open(c.bamFile.string().c_str(), \"r\");\n if (samfile == NULL) {\n\tstd::cerr << \"Fail to open file \" << c.bamFile.string() << std::endl;\n\treturn 1;\n }\n hts_idx_t* idx = sam_index_load(samfile, c.bamFile.string().c_str());\n if (idx == NULL) {\n\tif (bam_index_build(c.bamFile.string().c_str(), 0) != 0) {\n\t std::cerr << \"Fail to open index for \" << c.bamFile.string() << std::endl;\n\t return 1;\n\t}\n }\n bam_hdr_t* hdr = sam_hdr_read(samfile);\n if (hdr == NULL) {\n\tstd::cerr << \"Fail to open header for \" << c.bamFile.string() << std::endl;\n\treturn 1;\n }\n\n // Get sample name\n std::string sampleName;\n if (!getSMTag(std::string(hdr->text), c.bamFile.stem().string(), sampleName)) {\n\tstd::cerr << \"Only one sample (@RG:SM) is allowed per input BAM file \" << c.bamFile.string() << std::endl;\n\treturn 1;\n } else c.sampleName = sampleName;\n\n // Check input intervals (if present)\n if (vm.count(\"interval-file\")) {\n\tc.validChr.resize(hdr->n_targets, false);\n\tif (!(boost::filesystem::exists(c.int_file) && boost::filesystem::is_regular_file(c.int_file) && boost::filesystem::file_size(c.int_file))) {\n\t std::cerr << \"Interval file is missing: \" << c.int_file.string() << std::endl;\n\t return 1;\n\t}\n\tstd::string oldChr;\n\tif (!is_gz(c.int_file)) {\n\t std::ifstream interval_file(c.int_file.string().c_str(), std::ifstream::in);\n\t if (interval_file.is_open()) {\n\t while (interval_file.good()) {\n\t std::string intervalLine;\n\t getline(interval_file, intervalLine);\n\t typedef boost::tokenizer< boost::char_separator<char> > Tokenizer;\n\t boost::char_separator<char> sep(\" \\t,;\");\n\t Tokenizer tokens(intervalLine, sep);\n\t Tokenizer::iterator tokIter = tokens.begin();\n\t if (tokIter!=tokens.end()) {\n\t\tstd::string chrName=*tokIter++;\n\t\tif (chrName.compare(oldChr) != 0) {\n\t\t oldChr = chrName;\n\t\t int32_t tid = bam_name2id(hdr, chrName.c_str());\n\t\t if ((tid < 0) || (tid >= (int32_t) hdr->n_targets)) {\n\t\t std::cerr << \"Interval file chromosome \" << chrName << \" is NOT present in your BAM file header \" << c.bamFile.string() << std::endl;\n\t\t return 1;\n\t\t }\n\t\t c.validChr[tid] = true;\n\t\t}\n\t }\n\t }\n\t interval_file.close();\n\t }\n\t} else {\n\t std::ifstream file(c.int_file.string().c_str(), std::ios_base::in | std::ios_base::binary);\n\t boost::iostreams::filtering_streambuf<boost::iostreams::input> dataIn;\n\t dataIn.push(boost::iostreams::gzip_decompressor());\n\t dataIn.push(file);\n\t std::istream instream(&dataIn);\n\t std::string intervalLine;\n\t while(std::getline(instream, intervalLine)) {\n\t typedef boost::tokenizer< boost::char_separator<char> > Tokenizer;\n\t boost::char_separator<char> sep(\" \\t,;\");\n\t Tokenizer tokens(intervalLine, sep);\n\t Tokenizer::iterator tokIter = tokens.begin();\n\t if (tokIter!=tokens.end()) {\n\t std::string chrName=*tokIter++;\n\t if (chrName.compare(oldChr) != 0) {\n\t\toldChr = chrName;\n\t\tint32_t tid = bam_name2id(hdr, chrName.c_str());\n\t\tif ((tid < 0) || (tid >= (int32_t) hdr->n_targets)) {\n\t\t std::cerr << \"Interval file chromosome \" << chrName << \" is NOT present in your BAM file header \" << c.bamFile.string() << std::endl;\n\t\t return 1;\n\t\t}\n\t\tc.validChr[tid] = true;\n\t }\n\t }\n\t }\n\t file.close();\n\t}\n\tc.hasIntervalFile= true;\n } else {\n\tc.validChr.resize(hdr->n_targets, true); // All chromosomes need to be parsed\n\tc.hasIntervalFile = false;\n }\n\n // Clean-up\n bam_hdr_destroy(hdr);\n hts_idx_destroy(idx);\n sam_close(samfile);\n }\n\n // Show cmd\n boost::posix_time::ptime now = boost::posix_time::second_clock::local_time();\n std::cout << '[' << boost::posix_time::to_simple_string(now) << \"] \";\n std::cout << \"alfred \";\n for(int i=0; i<argc; ++i) { std::cout << argv[i] << ' '; }\n std::cout << std::endl;\n\n return countDNARun(c);\n }\n\n \n}\n\n#endif\n" }, { "alpha_fraction": 0.5546218752861023, "alphanum_fraction": 0.5546218752861023, "avg_line_length": 28.75, "blob_id": "c78bee4b6aa9fece2068a607a402f3549bf0f82e", "content_id": "84f01c2fa69fad6e1b43097874282f5c3cab9646", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 595, "license_type": "permissive", "max_line_length": 100, "num_lines": 20, "path": "/docs/.vuepress/config.js", "repo_name": "tobiasrausch/bamStats", "src_encoding": "UTF-8", "text": "module.exports = {\n title: \"Alfred documentation\",\n description:\n \"Documentation of Alfred, an app for BAM alignment statistics, feature counting and annotation\",\n base: \"/docs/alfred/\",\n themeConfig: {\n repo: \"tobiasrausch/alfred\",\n nav: [\n { text: \"Home\", link: \"/\" },\n { text: \"Installation\", link: \"/installation/\" },\n { text: \"Usage\", link: \"/cli/\" },\n { text: \"Web App\", link: \"/webapp/\" },\n { text: \"FAQ\", link: \"/faq/\" }\n ],\n sidebar: [\"/installation/\", \"/cli/\", \"/webapp/\", \"/faq/\"]\n },\n plugins: {\n \"@vuepress/back-to-top\": true\n }\n};\n" }, { "alpha_fraction": 0.5074626803398132, "alphanum_fraction": 0.5895522236824036, "avg_line_length": 40.230770111083984, "blob_id": "cd98747ca6989b62bbae3b781f2cc8d02f14d40f", "content_id": "e00ffe6bcbea880a46840d123fc73d767055c7ad", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1072, "license_type": "permissive", "max_line_length": 159, "num_lines": 26, "path": "/scripts/rd.R", "repo_name": "tobiasrausch/bamStats", "src_encoding": "UTF-8", "text": "library(ggplot2)\nlibrary(scales)\n\nchrs = c(\"chr1\",\"chr2\",\"chr3\",\"chr4\",\"chr5\",\"chr6\",\"chr7\",\"chr8\",\"chr9\",\"chr10\",\"chr11\",\"chr12\",\"chr13\",\"chr14\",\"chr15\",\"chr16\",\"chr17\",\"chr18\",\"chr19\",\"chrX\")\n#chrs = c(\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\",\"10\",\"11\",\"12\",\"13\",\"14\",\"15\",\"16\",\"17\",\"18\",\"19\",\"X\")\n\nargs = commandArgs(trailingOnly=TRUE)\nx = read.table(args[1], header=T)\nx = x[x$chr %in% chrs,]\nx$chr = factor(x$chr, levels=chrs)\n\n# Iterate samples\nfor (i in 5:ncol(x)) {\n sample = colnames(x)[i]\n print(sample)\n df = data.frame(chr=x$chr, start=x$start + (x$end - x$start) / 2, rd=log(x[,i] / median(x[,i]))/log(2))\n p1 = ggplot(data=df, aes(x=start, y=rd))\n p1 = p1 + geom_point(pch=21, size=0.5)\n p1 = p1 + xlab(\"Chromosome\")\n p1 = p1 + ylab(\"Log2 median normalized read depth\")\n p1 = p1 + scale_x_continuous(labels=comma)\n p1 = p1 + facet_grid(. ~ chr, scales=\"free_x\", space=\"free_x\")\n p1 = p1 + theme(axis.text.x = element_text(angle=45, hjust=1))\n ggsave(paste0(sample, \".wholegenome.pdf\"), width=24, height=6)\n print(warnings())\n}\n" }, { "alpha_fraction": 0.6396396160125732, "alphanum_fraction": 0.642642617225647, "avg_line_length": 14.857142448425293, "blob_id": "b04ba7176de19d6556f0ef58c418c40ead94a1b5", "content_id": "42af8d16fe0cace8b1b5f1bb5fbc432f61919468", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 333, "license_type": "permissive", "max_line_length": 48, "num_lines": 21, "path": "/scripts/merge.py", "repo_name": "tobiasrausch/bamStats", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python\n\nimport gzip\nimport json\nimport sys\n\nret = {\n \"samples\": []\n}\n\ndef opn(fn):\n if fn.endswith('.gz'):\n return gzip.open(fn)\n return open(fn)\n\nfor file_name in sys.argv[1:]:\n with opn(file_name) as f:\n file_content = json.load(f)\n ret[\"samples\"].extend(file_content[\"samples\"])\n\nprint(json.dumps(ret))\n" }, { "alpha_fraction": 0.6349955797195435, "alphanum_fraction": 0.6481263041496277, "avg_line_length": 33.93814468383789, "blob_id": "9fe5c411e104b4f7b84a00a54d14a40cf0803e10", "content_id": "292f5829dc3ad33fb8580e55c7ba2b37d7ef89ca", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 6778, "license_type": "permissive", "max_line_length": 175, "num_lines": 194, "path": "/src/pwalign.h", "repo_name": "tobiasrausch/bamStats", "src_encoding": "UTF-8", "text": "#ifndef PWALIGN_H\n#define PWALIGN_H\n\n#include <boost/program_options/cmdline.hpp>\n#include <boost/program_options/options_description.hpp>\n#include <boost/program_options/parsers.hpp>\n#include <boost/program_options/variables_map.hpp>\n#include <boost/filesystem.hpp>\n#include <boost/tokenizer.hpp>\n#include <iostream>\n#include <vector>\n#include <htslib/vcf.h>\n#include <htslib/sam.h>\n#include <math.h>\n#include <stdio.h>\n\n#include <htslib/sam.h>\n#include <htslib/faidx.h>\n\n#include \"util.h\"\n#include \"align.h\"\n#include \"needle.h\"\n#include \"gotoh.h\"\n#include \"swneedle.h\"\n#include \"swgotoh.h\"\n\nnamespace bamstats {\n\n\nstruct PWAlignConsensus {\n bool seq1endsfree;\n bool seq2endsfree;\n bool localAlignment;\n bool nonACGTN;\n int32_t gapopen;\n int32_t gapext;\n int32_t match;\n int32_t mismatch;\n std::string format;\n boost::filesystem::path alignment;\n std::vector<boost::filesystem::path> inputfiles;\n};\n\n\nint pwalign(int argc, char **argv) {\n PWAlignConsensus c;\n\n // Parameter\n boost::program_options::options_description generic(\"Generic options\");\n generic.add_options()\n (\"help,?\", \"show help message\")\n (\"gapopen,g\", boost::program_options::value<int32_t>(&c.gapopen)->default_value(-10), \"gap open\")\n (\"gapext,e\", boost::program_options::value<int32_t>(&c.gapext)->default_value(-1), \"gap extension\")\n (\"match,m\", boost::program_options::value<int32_t>(&c.match)->default_value(5), \"match\")\n (\"mismatch,n\", boost::program_options::value<int32_t>(&c.mismatch)->default_value(-4), \"mismatch\")\n (\"endsfree1,p\", \"leading/trailing gaps free for seq1\")\n (\"endsfree2,q\", \"leading/trailing gaps free for seq2\")\n (\"local,l\", \"local alignment\")\n (\"ambiguous,k\", \"allow IUPAC ambiguity codes\")\n ;\n\n boost::program_options::options_description otp(\"Output options\");\n otp.add_options()\n (\"format,f\", boost::program_options::value<std::string>(&c.format)->default_value(\"h\"), \"output format [v|h]\")\n (\"alignment,a\", boost::program_options::value<boost::filesystem::path>(&c.alignment)->default_value(\"al.fa.gz\"), \"vertical/horizontal alignment\")\n ;\n \n boost::program_options::options_description hidden(\"Hidden options\");\n hidden.add_options()\n (\"input-file\", boost::program_options::value< std::vector<boost::filesystem::path> >(&c.inputfiles), \"input fasta file\")\n ;\n\n boost::program_options::positional_options_description pos_args;\n pos_args.add(\"input-file\", -1);\n\n boost::program_options::options_description cmdline_options;\n cmdline_options.add(generic).add(otp).add(hidden);\n boost::program_options::options_description visible_options;\n visible_options.add(generic).add(otp);\n boost::program_options::variables_map vm;\n boost::program_options::store(boost::program_options::command_line_parser(argc, argv).options(cmdline_options).positional(pos_args).run(), vm);\n boost::program_options::notify(vm);\n\n // Check command line arguments\n if ((vm.count(\"help\")) || (!vm.count(\"input-file\")) || (c.inputfiles.size() != 2)) {\n std::cout << \"Usage: alfred \" << argv[0] << \" [OPTIONS] <seq1.fasta> <seq2.fasta>\" << std::endl;\n std::cout << visible_options << \"\\n\";\n return 1;\n }\n\n // Flags\n if (vm.count(\"endsfree1\")) c.seq1endsfree = true;\n else c.seq1endsfree = false;\n if (vm.count(\"endsfree2\")) c.seq2endsfree = true;\n else c.seq2endsfree = false;\n if (vm.count(\"local\")) c.localAlignment = true;\n else c.localAlignment = false;\n if (vm.count(\"ambiguous\")) c.nonACGTN = true;\n else c.nonACGTN = false;\n \n // Check input files\n for(unsigned int file_c = 0; file_c < c.inputfiles.size(); ++file_c) {\n if (!(boost::filesystem::exists(c.inputfiles[file_c]) && boost::filesystem::is_regular_file(c.inputfiles[file_c]) && boost::filesystem::file_size(c.inputfiles[file_c]))) {\n std::cerr << \"Input fasta file is missing: \" << c.inputfiles[file_c].string() << std::endl;\n return 1;\n }\n }\n \n // Show cmd\n boost::posix_time::ptime now = boost::posix_time::second_clock::local_time();\n std::cout << '[' << boost::posix_time::to_simple_string(now) << \"] \";\n std::cout << \"alfred \";\n for(int i=0; i<argc; ++i) { std::cout << argv[i] << ' '; }\n std::cout << std::endl;\n\n // Load FASTA sequences\n std::string faname1;\n std::string seq1;\n if (!loadSingleFasta(c.inputfiles[0].string(), faname1, seq1, c.nonACGTN)) return 1;\n std::cout << \"Sequence1: \" << faname1 << \", Length: \" << seq1.size() << std::endl;\n std::string faname2;\n std::string seq2;\n if (!loadSingleFasta(c.inputfiles[1].string(), faname2, seq2, c.nonACGTN)) return 1;\n std::cout << \"Sequence2: \" << faname2 << \", Length: \" << seq2.size() << std::endl;\n \n // Alignment\n typedef boost::multi_array<char, 2> TAlign;\n TAlign align;\n DnaScore<int> sc(c.match, c.mismatch, c.gapopen, c.gapext);\n int32_t alScore = 0;\n if (c.localAlignment) {\n AlignConfig<false, false> alignconf;\n alScore = swGotoh(seq1, seq2, align, alignconf, sc);\n } else {\n if (c.seq1endsfree) {\n if (c.seq2endsfree) {\n\tAlignConfig<true, true> alignconf;\n\talScore = gotoh(seq1, seq2, align, alignconf, sc);\n } else {\n\tAlignConfig<true, false> alignconf;\n\talScore = gotoh(seq1, seq2, align, alignconf, sc);\n }\n } else {\n if (c.seq2endsfree) {\n\tAlignConfig<false, true> alignconf;\n\talScore = gotoh(seq1, seq2, align, alignconf, sc);\n } else {\n\tAlignConfig<false, false> alignconf;\n\talScore = gotoh(seq1, seq2, align, alignconf, sc);\n }\n }\n }\n std::cout << \"Alignment score: \" << alScore << std::endl;\n\n // Output\n if (c.format == \"h\") {\n boost::iostreams::filtering_ostream rcfile;\n rcfile.push(boost::iostreams::gzip_compressor());\n rcfile.push(boost::iostreams::file_sink(c.alignment.c_str(), std::ios_base::out | std::ios_base::binary));\n typedef typename TAlign::index TAIndex;\n for(TAIndex i = 0; i < (TAIndex) align.shape()[0]; ++i) {\n if (i == 0) rcfile << \">\" << faname1 << std::endl;\n else rcfile << \">\" << faname2 << std::endl;\n for(TAIndex j = 0; j < (TAIndex) align.shape()[1]; ++j) {\n\trcfile << align[i][j];\n }\n rcfile << std::endl;\n }\n rcfile.pop();\n } else {\n boost::iostreams::filtering_ostream rcfile;\n rcfile.push(boost::iostreams::gzip_compressor());\n rcfile.push(boost::iostreams::file_sink(c.alignment.c_str(), std::ios_base::out | std::ios_base::binary));\n typedef typename TAlign::index TAIndex;\n for(TAIndex j = 0; j < (TAIndex) align.shape()[1]; ++j) {\n for(TAIndex i = 0; i < (TAIndex) align.shape()[0]; ++i) {\n\trcfile << align[i][j];\n }\n rcfile << std::endl;\n }\n rcfile.pop();\n }\n \n // Done\n now = boost::posix_time::second_clock::local_time();\n std::cout << '[' << boost::posix_time::to_simple_string(now) << \"] Done.\" << std::endl;\n \n return 0;\n}\n\n\n}\n\n#endif\n" }, { "alpha_fraction": 0.6144734621047974, "alphanum_fraction": 0.6248997449874878, "avg_line_length": 36.607887268066406, "blob_id": "76f0ad650d3e3c0d20b38e1a4a8885ae253ba66a", "content_id": "daad79a62253c9fe9a28fc23b7f95265f64f7592", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 16209, "license_type": "permissive", "max_line_length": 171, "num_lines": 431, "path": "/src/annotate.h", "repo_name": "tobiasrausch/bamStats", "src_encoding": "UTF-8", "text": "#ifndef ANNOTATE_H\n#define ANNOTATE_H\n\n#include <limits>\n\n#include <boost/icl/split_interval_map.hpp>\n#include <boost/dynamic_bitset.hpp>\n#include <boost/unordered_map.hpp>\n#include <boost/date_time/posix_time/posix_time.hpp>\n#include <boost/date_time/gregorian/gregorian.hpp>\n#include <boost/progress.hpp>\n\n#include <htslib/sam.h>\n#include <htslib/faidx.h>\n\n#include \"version.h\"\n#include \"util.h\"\n#include \"gtf.h\"\n#include \"gff3.h\"\n#include \"bed.h\"\n#include \"motif.h\"\n\nnamespace bamstats\n{\n\n struct AnnotateConfig {\n typedef std::map<std::string, int32_t> TChrMap;\n bool motifPosOut;\n bool nearest;\n bool overlappingHits;\n uint8_t inputFileFormat; // 0 = gtf, 1 = bed, 2 = gff3, 3 = motif file\n int32_t maxDistance;\n float motifScoreQuantile;\n TChrMap nchr;\n std::string idname;\n std::string feature;\n boost::filesystem::path motifFile;\n boost::filesystem::path genome;\n boost::filesystem::path gtfFile;\n boost::filesystem::path bedFile;\n boost::filesystem::path infile;\n boost::filesystem::path outpos;\n boost::filesystem::path outgene;\n boost::filesystem::path outfile;\n };\n\n\n template<typename TConfig, typename TGenomicRegions, typename TGeneIds>\n inline int32_t\n bed_anno(TConfig const& c, TGenomicRegions& gRegions, TGeneIds& geneIds) {\n typedef typename TGenomicRegions::value_type TChromosomeRegions;\n\n // Parse BED file\n boost::posix_time::ptime now = boost::posix_time::second_clock::local_time();\n std::cout << '[' << boost::posix_time::to_simple_string(now) << \"] \" << \"BED file parsing\" << std::endl;\n boost::progress_display show_progress(c.nchr.size());\n\n // Distance vector\n std::vector<int32_t> dist(geneIds.size(), 0);\n \n // Open output file\n std::ofstream ofile(c.outfile.string().c_str());\n ofile << \"chrom\\tstart\\tend\\tid\\tfeature\\tdistance\" << std::endl;\n\n // Peak count and names\n std::vector<std::string> peakNames;\n\n // Gene-level summary\n typedef std::pair<int32_t, int32_t> TDistPeak;\n typedef std::vector<TDistPeak> TPeaksPerGene;\n std::vector<TPeaksPerGene> geneView(geneIds.size(), TPeaksPerGene());\n \n // Iterate chromosomese\n for(int32_t refIndex=0; refIndex < (int32_t) c.nchr.size(); ++refIndex) {\n ++show_progress;\n\n // Sort by position\n std::sort(gRegions[refIndex].begin(), gRegions[refIndex].end(), SortIntervalStart<IntervalLabel>());\n\n // Flag feature positions\n typedef boost::dynamic_bitset<> TBitSet;\n TBitSet featureBitMap(250000000);\n for(uint32_t i = 0; i < gRegions[refIndex].size(); ++i)\n\tfor(int32_t k = gRegions[refIndex][i].start; k < gRegions[refIndex][i].end; ++k) featureBitMap[k] = 1;\n\n // Annotate intervals\n std::ifstream chrFile(c.infile.string().c_str(), std::ifstream::in);\n if (chrFile.is_open()) {\n\twhile (chrFile.good()) {\n\t std::string chrFromFile;\n\t getline(chrFile, chrFromFile);\n\t typedef boost::tokenizer< boost::char_separator<char> > Tokenizer;\n\t boost::char_separator<char> sep(\" \\t,;\");\n\t Tokenizer tokens(chrFromFile, sep);\n\t Tokenizer::iterator tokIter = tokens.begin();\n\t if (tokIter!=tokens.end()) {\n\t std::string chrName = *tokIter++;\n\t if (c.nchr.find(chrName)->second != refIndex) continue;\n\t int32_t start = boost::lexical_cast<int32_t>(*tokIter++);\n\t int32_t end = boost::lexical_cast<int32_t>(*tokIter++);\n\t {\n\t std::string name = \"Interval\" + boost::lexical_cast<std::string>(peakNames.size());\n\t if (tokIter != tokens.end()) name = *tokIter++;\n\t peakNames.push_back(name);\n\t }\n\t if (start >= end) continue; // Bed has right-open intervals\n\t typedef std::vector<int32_t> TFeaturePos;\n\t TFeaturePos featurepos;\n\t int32_t realstart = std::max(0, start - c.maxDistance);\n\t int32_t realend = std::min(250000000, end + c.maxDistance);\n\t for(int32_t i = realstart; i<realend; ++i)\n\t if (featureBitMap[i]) featurepos.push_back(i);\n\n\t // Find feature\n\t typedef std::set<int32_t> TFeatureIds;\n\t TFeatureIds featureid; // No feature by default\n\t if (!featurepos.empty()) {\n\t int32_t fpfirst = featurepos[0];\n\t int32_t fplast = featurepos[featurepos.size()-1];\n\t for(typename TChromosomeRegions::const_iterator vIt = gRegions[refIndex].begin(); vIt != gRegions[refIndex].end(); ++vIt) {\n\t\tif (vIt->end <= fpfirst) continue;\n\t\tif (vIt->start > fplast) break; // Sorted intervals so we can stop searching\n\t\tfor(TFeaturePos::const_iterator fIt = featurepos.begin(); fIt != featurepos.end(); ++fIt) {\n\t\t if ((vIt->start <= *fIt) && (vIt->end > *fIt)) {\n\t\t featureid.insert(vIt->lid);\n\n\t\t // Get distance\n\t\t int32_t locdist = 0;\n\t\t if (vIt->end < start) { locdist = vIt->end - start; }\n\t\t if (end < vIt->start) { locdist = vIt->start - end; }\n\t\t dist[vIt->lid] = locdist;\n\t\t break;\n\t\t }\n\t\t}\n\t }\n\t }\n\n\t // Output overlapping features\n\t int32_t bestDistance = 250000000;\n\t int32_t peakId = peakNames.size() - 1;\n\t ofile << chrName << \"\\t\" << start << \"\\t\" << end << \"\\t\" << peakNames[peakId] << \"\\t\";\n\t // Feature names\n\t if (featureid.empty()) {\n\t ofile << \"NA\";\n\t } else {\n\t for(typename TFeatureIds::const_iterator itF = featureid.begin(); itF != featureid.end(); ++itF) geneView[*itF].push_back(std::make_pair(dist[*itF], peakId));\n\t if (c.nearest) {\n\t\tint32_t bestFeature = 0;\n\t\tfor(typename TFeatureIds::const_iterator itF = featureid.begin(); itF != featureid.end(); ++itF) {\n\t\t if (std::abs(dist[*itF]) < std::abs(bestDistance)) {\n\t\t bestDistance = dist[*itF];\n\t\t bestFeature = *itF;\n\t\t }\n\t\t}\n\t\tofile << geneIds[bestFeature];\n\t } else {\n\t\tbool firstF = true;\n\t\tfor(typename TFeatureIds::const_iterator itF = featureid.begin(); itF != featureid.end(); ++itF) {\n\t\t if (!firstF) ofile << ',';\n\t\t else firstF = false;\n\t\t ofile << geneIds[*itF];\n\t\t}\n\t }\n\t }\n\t ofile << \"\\t\";\n\t // Feature distances\n\t if (featureid.empty()) {\n\t ofile << \"NA\";\n\t } else {\n\t if (c.nearest) {\n\t\tofile << bestDistance;\n\t } else {\n\t\tbool firstF = true;\n\t\tfor(typename TFeatureIds::const_iterator itF = featureid.begin(); itF != featureid.end(); ++itF) {\n\t\t if (!firstF) ofile << ',';\n\t\t else firstF = false;\n\t\t ofile << dist[*itF];\n\t\t}\n\t }\n\t }\n\t ofile << std::endl;\n\t }\n\t}\n\tchrFile.close();\n }\n }\n ofile.close();\n\n // Output gene-level view\n std::ofstream gfile(c.outgene.string().c_str());\n gfile << \"gene\\tpeak\\tdistance\" << std::endl;\n for(uint32_t i = 0; i < geneIds.size(); ++i) {\n if (!geneView[i].empty()) {\n\tif (c.nearest) {\n\t int32_t bestDistance = 250000000;\n\t int32_t bestIdx = -1;\n\t for(typename TPeaksPerGene::const_iterator itDP = geneView[i].begin(); itDP != geneView[i].end(); ++itDP) {\n\t if (std::abs(itDP->first) < std::abs(bestDistance)) {\n\t bestIdx = itDP->second;\n\t bestDistance = itDP->first;\n\t }\n\t }\n\t gfile << geneIds[i] << \"\\t\" << peakNames[bestIdx] << \"\\t\" << bestDistance << std::endl;\n\t} else {\n\t gfile << geneIds[i] << \"\\t\";\n\t std::sort(geneView[i].begin(), geneView[i].end());\n\t bool firstF = true;\n\t for(typename TPeaksPerGene::const_iterator itDP = geneView[i].begin(); itDP != geneView[i].end(); ++itDP) {\n\t if (!firstF) gfile << ',';\n\t else firstF = false;\n\t gfile << peakNames[itDP->second];\n\t }\n\t gfile << \"\\t\";\n\t firstF = true;\n\t for(typename TPeaksPerGene::const_iterator itDP = geneView[i].begin(); itDP != geneView[i].end(); ++itDP) {\n\t if (!firstF) gfile << ',';\n\t else firstF = false;\n\t gfile << itDP->first;\n\t }\n\t gfile << std::endl;\n\t}\n }\n }\n gfile.close();\n\n // Done\n return 0;\n }\n\n\n \n template<typename TConfig>\n inline int32_t\n annotateRun(TConfig const& c) {\n\n#ifdef PROFILE\n ProfilerStart(\"alfred.prof\");\n#endif\n\n // Parse GTF file\n typedef std::vector<IntervalLabel> TChromosomeRegions;\n typedef std::vector<TChromosomeRegions> TGenomicRegions;\n TGenomicRegions gRegions;\n gRegions.resize(c.nchr.size(), TChromosomeRegions());\n typedef std::vector<std::string> TGeneIds;\n TGeneIds geneIds;\n int32_t tf = 0;\n if (c.inputFileFormat == 0) tf = parseGTF(c, gRegions, geneIds);\n else if (c.inputFileFormat == 1) tf = parseBED(c, gRegions, geneIds);\n else if (c.inputFileFormat == 2) tf = parseGFF3(c, gRegions, geneIds);\n else if (c.inputFileFormat == 3) tf = parseJaspar(c, gRegions, geneIds);\n if (tf == 0) {\n std::cerr << \"Error parsing GTF/GFF3/BED file!\" << std::endl;\n std::cerr << \"Please check that the chromosome names agree (chr1 versus 1) between input and annotation file.\" << std::endl;\n return 1;\n }\n\n // Feature annotation\n int32_t retparse = bed_anno(c, gRegions, geneIds);\n if (retparse != 0) {\n std::cerr << \"Error in BED annotation!\" << std::endl;\n return 1;\n }\n \n // Done\n boost::posix_time::ptime now = boost::posix_time::second_clock::local_time();\n std::cout << '[' << boost::posix_time::to_simple_string(now) << \"] Done.\" << std::endl;\n \n#ifdef PROFILE\n ProfilerStop();\n#endif\n\n return 0;\n }\n\n\n int annotate(int argc, char **argv) {\n AnnotateConfig c;\n\n // Parameter\n boost::program_options::options_description generic(\"Generic options\");\n generic.add_options()\n (\"help,?\", \"show help message\")\n (\"distance,d\", boost::program_options::value<int32_t>(&c.maxDistance)->default_value(0), \"max. distance (0: overlapping features only)\")\n (\"outgene,u\", boost::program_options::value<boost::filesystem::path>(&c.outgene)->default_value(\"gene.bed\"), \"gene/motif-level output\")\n (\"outfile,o\", boost::program_options::value<boost::filesystem::path>(&c.outfile)->default_value(\"anno.bed\"), \"annotated peaks output\")\n (\"nearest,n\", \"nearest feature only\")\n ;\n\n boost::program_options::options_description gtfopt(\"GTF/GFF3 annotation file options\");\n gtfopt.add_options()\n (\"gtf,g\", boost::program_options::value<boost::filesystem::path>(&c.gtfFile), \"gtf/gff3 file\")\n (\"id,i\", boost::program_options::value<std::string>(&c.idname)->default_value(\"gene_name\"), \"gtf/gff3 attribute\")\n (\"feature,f\", boost::program_options::value<std::string>(&c.feature)->default_value(\"gene\"), \"gtf/gff3 feature\")\n ;\n\n boost::program_options::options_description motifopt(\"Motif annotation file options\");\n motifopt.add_options()\n (\"motif,m\", boost::program_options::value<boost::filesystem::path>(&c.motifFile), \"motif file in jaspar or raw format\")\n (\"reference,r\", boost::program_options::value<boost::filesystem::path>(&c.genome), \"reference file\")\n (\"quantile,q\", boost::program_options::value<float>(&c.motifScoreQuantile)->default_value(0.95), \"motif quantile score [0,1]\")\n (\"position,p\", boost::program_options::value<boost::filesystem::path>(&c.outpos), \"gzipped output file of motif hits\")\n (\"exclude,x\", \"exclude overlapping hits of the same motif\")\n ;\n \n \n boost::program_options::options_description bedopt(\"BED annotation file options, columns chr, start, end, name\");\n bedopt.add_options()\n (\"bed,b\", boost::program_options::value<boost::filesystem::path>(&c.bedFile), \"bed file\")\n ;\n\n boost::program_options::options_description hidden(\"Hidden options\");\n hidden.add_options()\n (\"input-file\", boost::program_options::value<boost::filesystem::path>(&c.infile), \"input file\")\n ;\n\n boost::program_options::positional_options_description pos_args;\n pos_args.add(\"input-file\", -1);\n\n boost::program_options::options_description cmdline_options;\n cmdline_options.add(generic).add(gtfopt).add(bedopt).add(motifopt).add(hidden);\n boost::program_options::options_description visible_options;\n visible_options.add(generic).add(gtfopt).add(bedopt).add(motifopt);\n\n // Parse command-line\n boost::program_options::variables_map vm;\n boost::program_options::store(boost::program_options::command_line_parser(argc, argv).options(cmdline_options).positional(pos_args).run(), vm);\n boost::program_options::notify(vm);\n\n // Check command line arguments\n if ((vm.count(\"help\")) || (!vm.count(\"input-file\")) || ((!vm.count(\"gtf\")) && (!vm.count(\"bed\")) && (!vm.count(\"motif\")))) {\n std::cout << std::endl;\n std::cout << \"Usage: alfred \" << argv[0] << \" [OPTIONS] -g <hg19.gtf.gz> <peaks.bed>\" << std::endl;\n std::cout << \"Usage: alfred \" << argv[0] << \" [OPTIONS] -b <hg19.bed.gz> <peaks.bed>\" << std::endl;\n std::cout << \"Usage: alfred \" << argv[0] << \" [OPTIONS] -m <motif.jaspar.gz> -r <genome.fa> <peaks.bed>\" << std::endl;\n std::cout << visible_options << \"\\n\";\n return 1;\n }\n\n // Motif position\n if (vm.count(\"position\")) c.motifPosOut = true;\n else c.motifPosOut = false;\n\n // Nearest feature only\n if (vm.count(\"nearest\")) c.nearest = true;\n else c.nearest = false;\n\n // Overlapping motif hits\n if (vm.count(\"exclude\")) c.overlappingHits = false;\n else c.overlappingHits = true;\n \n // Input BED file\n if (!(boost::filesystem::exists(c.infile) && boost::filesystem::is_regular_file(c.infile) && boost::filesystem::file_size(c.infile))) {\n std::cerr << \"Input BED file is missing.\" << std::endl;\n return 1;\n } else {\n std::string oldChr = \"\";\n typedef std::set<std::string> TChrSet;\n TChrSet chrSet;\n std::ifstream chrFile(c.infile.string().c_str(), std::ifstream::in);\n if (chrFile.is_open()) {\n\twhile (chrFile.good()) {\n\t std::string chrFromFile;\n\t getline(chrFile, chrFromFile);\n\t typedef boost::tokenizer< boost::char_separator<char> > Tokenizer;\n\t boost::char_separator<char> sep(\" \\t,;\");\n\t Tokenizer tokens(chrFromFile, sep);\n\t Tokenizer::iterator tokIter = tokens.begin();\n\t if (tokIter!=tokens.end()) {\n\t std::string chrName = *tokIter++;\n\t if (chrName != oldChr) chrSet.insert(chrName);\n\t }\n\t}\n\tchrFile.close();\n }\n int32_t refIndex = 0;\n for(TChrSet::iterator itc = chrSet.begin(); itc != chrSet.end(); ++itc, ++refIndex) c.nchr.insert(std::make_pair(*itc, refIndex));\n }\n \n // Check region file\n if (!(boost::filesystem::exists(c.gtfFile) && boost::filesystem::is_regular_file(c.gtfFile) && boost::filesystem::file_size(c.gtfFile))) {\n if (!(boost::filesystem::exists(c.bedFile) && boost::filesystem::is_regular_file(c.bedFile) && boost::filesystem::file_size(c.bedFile))) {\n\tif (!(boost::filesystem::exists(c.motifFile) && boost::filesystem::is_regular_file(c.motifFile) && boost::filesystem::file_size(c.motifFile))) {\n\t std::cerr << \"Input gtf/bed/motif annotation file is missing.\" << std::endl;\n\t return 1;\n\t} else {\n\t c.inputFileFormat = 3;\n\t if ((!(vm.count(\"reference\"))) || (!(boost::filesystem::exists(c.genome) && boost::filesystem::is_regular_file(c.genome) && boost::filesystem::file_size(c.genome)))) {\n\t std::cerr << \"Motif annotation requires a reference genome file.\" << std::endl;\n\t return 1;\n\t } else {\n\t faidx_t* fai = fai_load(c.genome.string().c_str());\n\t if (fai == NULL) {\n\t if (fai_build(c.genome.string().c_str()) == -1) {\n\t\tstd::cerr << \"Fail to open genome fai index for \" << c.genome.string() << std::endl;\n\t\treturn 1;\n\t } else fai = fai_load(c.genome.string().c_str());\n\t }\n\t // Check that all chromosomes in input file are present\n\t for(typename AnnotateConfig::TChrMap::const_iterator itC = c.nchr.begin(); itC != c.nchr.end(); ++itC) {\n\t std::string chrName(itC->first);\n\t if (!faidx_has_seq(fai, chrName.c_str())) {\n\t\t std::cerr << \"Chromosome from bed file \" << chrName << \" is NOT present in your reference file \" << c.genome.string() << std::endl;\n\t\t return 1;\n\t }\n\t }\n\t fai_destroy(fai);\n\t }\n\t}\n } else c.inputFileFormat = 1;\n } else {\n if (is_gff3(c.gtfFile)) c.inputFileFormat = 2;\n else c.inputFileFormat = 0;\n }\n\n // Show cmd\n boost::posix_time::ptime now = boost::posix_time::second_clock::local_time();\n std::cout << '[' << boost::posix_time::to_simple_string(now) << \"] \";\n std::cout << \"alfred \";\n for(int i=0; i<argc; ++i) { std::cout << argv[i] << ' '; }\n std::cout << std::endl;\n\n return annotateRun(c);\n }\n \n\n\n \n}\n\n#endif\n" }, { "alpha_fraction": 0.6625916957855225, "alphanum_fraction": 0.7408313155174255, "avg_line_length": 66.83333587646484, "blob_id": "3ba11be98d5cab504395cc2994430177b033b25c", "content_id": "ee43bcb510c04960f54056639e396ae1c6d2715a", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 409, "license_type": "permissive", "max_line_length": 102, "num_lines": 6, "path": "/gtf/downloadGTF.sh", "repo_name": "tobiasrausch/bamStats", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nwget ftp://ftp.ensembl.org/pub/grch37/current/gff3/homo_sapiens/Homo_sapiens.GRCh37.[0-9][0-9].gff3.gz\nwget ftp://ftp.ensembl.org/pub/grch37/current/gtf/homo_sapiens/Homo_sapiens.GRCh37.[0-9][0-9].gtf.gz\nwget ftp://ftp.ensembl.org/pub/current_gff3/homo_sapiens/Homo_sapiens.GRCh38.[0-9][0-9].gff3.gz\nwget ftp://ftp.ensembl.org/pub/current_gtf/homo_sapiens/Homo_sapiens.GRCh38.[0-9][0-9].gtf.gz\n\n\n" }, { "alpha_fraction": 0.6057504415512085, "alphanum_fraction": 0.6811203360557556, "avg_line_length": 44.15126037597656, "blob_id": "b2e96a418bbbd9387b61bd33009656d9d826b407", "content_id": "830c5985e718e8ebb8af54f379159f25a38f03f6", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 10747, "license_type": "permissive", "max_line_length": 212, "num_lines": 238, "path": "/example/example.sh", "repo_name": "tobiasrausch/bamStats", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nif [ $# -ne 1 ]\nthen\n echo \"Usage: $0 [tiny|full]\"\n exit -1\nfi\n\nSCRIPT=$(readlink -f \"$0\")\nBASEDIR=$(dirname \"$SCRIPT\")\n\nif [ ${1} == \"tiny\" ]\nthen\n # Run the E.coli example\n echo \"tiny example\"\n ${BASEDIR}/../src/alfred qc -r ${BASEDIR}/E.coli.fa.gz -j ecoli.json.gz ${BASEDIR}/E.coli.cram\n ${BASEDIR}/../src/alfred qc -r ${BASEDIR}/E.coli.fa.gz -o ecoli.tsv.gz ${BASEDIR}/E.coli.cram\n Rscript ${BASEDIR}/../scripts/stats.R ecoli.tsv.gz\n\nelif [ ${1} == \"full\" ]\nthen\n echo \"full example\"\n\n # Check dependency tools\n wget --version > /dev/null\n if [ $? -ne 0 ]\n then\n\techo \"\"\n\techo \"wget is required!\"\n\techo \"\"\n\texit\n fi\n samtools --version > /dev/null\n if [ $? -ne 0 ]\n then\n\techo \"\"\n\techo \"Samtools is required!\"\n\techo \"\"\n\texit\n fi\n\n # Download and index reference\n if [ ! -f GRCh38_full_analysis_set_plus_decoy_hla.fa ]\n then\n\twget 'ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/technical/reference/GRCh38_reference_genome/GRCh38_full_analysis_set_plus_decoy_hla.fa'\n\tsamtools faidx GRCh38_full_analysis_set_plus_decoy_hla.fa\n\twget 'ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/technical/reference/human_g1k_v37.fasta.gz'\n\tzcat human_g1k_v37.fasta.gz | sed 's/>\\([0-9XYM][0-9T]*\\) />chr\\1 /' | sed 's/>chrMT/>chrM/' > hg19.fa\n\trm human_g1k_v37.fasta.gz\n\tsamtools faidx hg19.fa\n fi\n\n # Download gene annotation\n if [ ! -f ${BASEDIR}/../gtf/Homo_sapiens.GRCh38.91.gtf.gz ]\n then\n\tcd ${BASEDIR}/../gtf/ && ./downloadGTF.sh && cd ${BASEDIR}\n fi\n\n # Generate exon target file\n if [ ! -f ${BASEDIR}/../maps/exonic.hg38.bed.gz ]\n then\n\tcd ${BASEDIR}/../maps/ && Rscript exon.R && cd ${BASEDIR}\n fi\n \n # Download 1000 Genomes exome cram file\n if [ ! -f HG00114.alt_bwamem_GRCh38DH.20150826.GBR.exome.cram ]\n then\n\twget 'ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/data_collections/1000_genomes_project/data/GBR/HG00114/exome_alignment/HG00114.alt_bwamem_GRCh38DH.20150826.GBR.exome.cram'\n\twget 'ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/data_collections/1000_genomes_project/data/GBR/HG00114/exome_alignment/HG00114.alt_bwamem_GRCh38DH.20150826.GBR.exome.cram.crai'\n fi\n\n # Run alfred\n ${BASEDIR}/../src/alfred qc -r GRCh38_full_analysis_set_plus_decoy_hla.fa -j HG00114.exome.illumina.json.gz -b ${BASEDIR}/../maps/exonic.hg38.bed.gz HG00114.alt_bwamem_GRCh38DH.20150826.GBR.exome.cram\n ${BASEDIR}/../src/alfred qc -r GRCh38_full_analysis_set_plus_decoy_hla.fa -o HG00114.exome.illumina.tsv.gz -b ${BASEDIR}/../maps/exonic.hg38.bed.gz HG00114.alt_bwamem_GRCh38DH.20150826.GBR.exome.cram\n Rscript ${BASEDIR}/../scripts/stats.R HG00114.exome.illumina.tsv.gz\n\nelif [ ${1} == \"benchmark\" ]\nthen\n\n ########\n # Requires a \"full\" run first to download required annotation and reference files\n ########\n\n # Exome\n if [ ! -f dna.exome.illumina.pe.ms.json.gz ]\n then\n\tfor SAMPLE in HG00110 HG00111 HG00112 HG00113 HG00114 HG00115\n\tdo\n\t # Download 1000 Genomes exome cram file\n\t if [ ! -f ${SAMPLE}.alt_bwamem_GRCh38DH.20150826.GBR.exome.cram ]\n\t then\n\t\twget \"ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/data_collections/1000_genomes_project/data/GBR/${SAMPLE}/exome_alignment/${SAMPLE}.alt_bwamem_GRCh38DH.20150826.GBR.exome.cram\"\n\t\twget \"ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/data_collections/1000_genomes_project/data/GBR/${SAMPLE}/exome_alignment/${SAMPLE}.alt_bwamem_GRCh38DH.20150826.GBR.exome.cram.crai\"\n\t fi\n\t ${BASEDIR}/../src/alfred qc -r GRCh38_full_analysis_set_plus_decoy_hla.fa -j ${SAMPLE}.exome.illumina.pe.json.gz -b ${BASEDIR}/../maps/exonic.hg38.bed.gz ${SAMPLE}.alt_bwamem_GRCh38DH.20150826.GBR.exome.cram\n\tdone\n\tpython ${BASEDIR}/../scripts/merge.py *.exome.illumina.pe.json.gz | gzip -c > dna.exome.illumina.pe.ms.json.gz\n\trm *.exome.illumina.pe.json.gz\n fi\n\n # WGS\n if [ ! -f dna.wgs.illumina.pe.ms.json.gz ]\n then\n\tfor SAMPLE in HG00512 HG00513\n\tdo\n\t if [ ! -f ${SAMPLE}.alt_bwamem_GRCh38DH.20150715.CHS.high_coverage.cram ]\n\t then\n\t\twget \"ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/data_collections/hgsv_sv_discovery/data/CHS/${SAMPLE}/high_cov_alignment/${SAMPLE}.alt_bwamem_GRCh38DH.20150715.CHS.high_coverage.cram\"\n\t\twget \"ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/data_collections/hgsv_sv_discovery/data/CHS/${SAMPLE}/high_cov_alignment/${SAMPLE}.alt_bwamem_GRCh38DH.20150715.CHS.high_coverage.cram.crai\"\n\t fi\n\t ${BASEDIR}/../src/alfred qc -r GRCh38_full_analysis_set_plus_decoy_hla.fa -j ${SAMPLE}.wgs.illumina.pe.json.gz ${SAMPLE}.alt_bwamem_GRCh38DH.20150715.CHS.high_coverage.cram\n\tdone\n\tpython ${BASEDIR}/../scripts/merge.py *.wgs.illumina.pe.json.gz | gzip -c > dna.wgs.illumina.pe.ms.json.gz\n\trm *.wgs.illumina.pe.json.gz\n fi\n\n # WGS Mate-pairs\n if [ ! -f dna.wgs.illumina.mp.ms.json.gz ]\n then\n\tfor SAMPLE in HG00512 HG00513\n\tdo\n\t if [ ! -f ${SAMPLE}.alt_bwamem_GRCh38DH.20150724.CHS.sv_7kb_mate.cram ]\n\t then\n\t\twget \"ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/data_collections/hgsv_sv_discovery/data/CHS/${SAMPLE}/sv_7kb_mate/${SAMPLE}.alt_bwamem_GRCh38DH.20150724.CHS.sv_7kb_mate.cram\"\n\t\twget \"ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/data_collections/hgsv_sv_discovery/data/CHS/${SAMPLE}/sv_7kb_mate/${SAMPLE}.alt_bwamem_GRCh38DH.20150724.CHS.sv_7kb_mate.cram.crai\"\n\t fi\n\t ${BASEDIR}/../src/alfred qc -r GRCh38_full_analysis_set_plus_decoy_hla.fa -j ${SAMPLE}.wgs.illumina.mp.json.gz ${SAMPLE}.alt_bwamem_GRCh38DH.20150724.CHS.sv_7kb_mate.cram\n\tdone\n\tpython ${BASEDIR}/../scripts/merge.py *.wgs.illumina.mp.json.gz | gzip -c > dna.wgs.illumina.mp.ms.json.gz\n\trm *.wgs.illumina.mp.json.gz\n fi\n\n # RNA-Seq, Geuvadis\n if [ ! -f rna.illumina.pe.ms.json.gz ]\n then\n\tfor SAMPLE in HG00096.1.M_111124_6 HG00101.1.M_111124_4 HG00104.1.M_111124_5 HG00117.1.M_111124_2 HG00121.1.M_111124_7 \n\tdo\n\t if [ ! -f ${SAMPLE}.bam ]\n\t then\n\t\twget \"https://www.ebi.ac.uk/arrayexpress/files/E-GEUV-1/${SAMPLE}.bam\"\n\t\tsamtools index ${SAMPLE}.bam\n\t fi\n\t ${BASEDIR}/../src/alfred qc -a ${SAMPLE} -r hg19.fa -j ${SAMPLE}.rna.illumina.pe.json.gz ${SAMPLE}.bam\n\tdone\n\tpython ${BASEDIR}/../scripts/merge.py *.rna.illumina.pe.json.gz | gzip -c > rna.illumina.pe.ms.json.gz\n\trm *.rna.illumina.pe.json.gz\n fi\n\n # Hi-C\n if [ ! -f hic.illumina.pe.ms.json.gz ]\n then\n\tfor SAMPLE in HG00732 HG00733\n\tdo\n\t if [ ! -f ${SAMPLE}_Hi-C_biorep2_merged_filtered.bam ]\n\t then\n\t\twget \"ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/data_collections/hgsv_sv_discovery/working/20160822_HiC_bam_files/${SAMPLE}_Hi-C_biorep2_merged_filtered.bam\"\n\t\tsamtools index ${SAMPLE}_Hi-C_biorep2_merged_filtered.bam\n\t fi\n\t ${BASEDIR}/../src/alfred qc -r GRCh38_full_analysis_set_plus_decoy_hla.fa -j ${SAMPLE}.hic.illumina.pe.json.gz ${SAMPLE}_Hi-C_biorep2_merged_filtered.bam\n\tdone\n\tpython ${BASEDIR}/../scripts/merge.py *.hic.illumina.pe.json.gz | gzip -c > hic.illumina.pe.ms.json.gz\n\trm *.hic.illumina.pe.json.gz\n fi\n\n # ONT\n if [ ! -f dna.wgs.ont.se.ms.json.gz ]\n then\n\tif [ -f /opt/dev/HG00733/HG00733.bam ]\n\tthen\n\t ${BASEDIR}/../src/alfred qc -r GRCh38_full_analysis_set_plus_decoy_hla.fa -j dna.wgs.ont.se.ms.json.gz /opt/dev/HG00733/HG00733.bam\n\tfi\n fi\n\n # PacBio\n if [ ! -f dna.wgs.pacbio.se.ms.json.gz ]\n then\n\tfor SAMPLE in NA19238 NA19239\n\tdo\n\t if [ ! -f ${SAMPLE}_bwamem_GRCh38DH_YRI_20160905_pacbio.bam ]\n\t then\n\t\twget \"ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/data_collections/hgsv_sv_discovery/working/20160905_smithm_pacbio_aligns/${SAMPLE}_bwamem_GRCh38DH_YRI_20160905_pacbio.bam\"\n\t\twget \"ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/data_collections/hgsv_sv_discovery/working/20160905_smithm_pacbio_aligns/${SAMPLE}_bwamem_GRCh38DH_YRI_20160905_pacbio.bam.bai\"\n\t fi\n\t ${BASEDIR}/../src/alfred qc -r GRCh38_full_analysis_set_plus_decoy_hla.fa -j ${SAMPLE}.dna.wgs.pacbio.se.json.gz ${SAMPLE}_bwamem_GRCh38DH_YRI_20160905_pacbio.bam\n\tdone\n\tpython ${BASEDIR}/../scripts/merge.py *.dna.wgs.pacbio.se.json.gz | gzip -c > dna.wgs.pacbio.se.ms.json.gz\n\trm *.dna.wgs.pacbio.se.json.gz\n fi\n\n # ATAC-Seq\n if [ ! -f atac.illumina.pe.ms.json.gz ]\n then\n\tfor SAMPLE in atac1 atac2\n\tdo\n\t ${BASEDIR}/../src/alfred qc -a ${SAMPLE} -r hs37d5.fa -j ${SAMPLE}.atac.illumina.pe.json.gz ${SAMPLE}.bam\n\tdone\n\tpython ${BASEDIR}/../scripts/merge.py *.atac.illumina.pe.json.gz | gzip -c > atac.illumina.pe.ms.json.gz\n\trm *.atac.illumina.pe.json.gz\n fi\n\n # ChIP-Seq, Encode\n if [ ! -f chip.illumina.se.ms.json.gz ]\n then\n\tfor SAMPLE in H3K27ac H3K27me3 H3K9ac\n\tdo\n\t ${BASEDIR}/../src/alfred qc -a GM12878_${SAMPLE} -r hs37d5.fa -j ${SAMPLE}.chip.illumina.se.json.gz ${SAMPLE}.bam\n\tdone\n\tpython ${BASEDIR}/../scripts/merge.py *.chip.illumina.se.json.gz | gzip -c > chip.illumina.se.ms.json.gz\n\trm *.chip.illumina.se.json.gz\n fi\nelif [ ${1} == \"runtime\" ]\nthen\n # RNA benchmark\n zcat ${BASEDIR}/../gtf/Homo_sapiens.GRCh37.75.gtf.gz | grep \"^#\" | gzip -c > Homo_sapiens.GRCh37.75.chr.gtf.gz\n zcat ${BASEDIR}/../gtf/Homo_sapiens.GRCh37.75.gtf.gz | grep -v \"^#\" | grep -P \"^[0-9XY]*\\t\" | sed 's/^/chr/' | gzip -c >> Homo_sapiens.GRCh37.75.chr.gtf.gz\n /usr/bin/time -v ${BASEDIR}/../src/alfred count_rna -g Homo_sapiens.GRCh37.75.chr.gtf.gz HG00104.1.M_111124_5.bam\n gunzip Homo_sapiens.GRCh37.75.chr.gtf.gz\n /usr/bin/time -v ./qualimap_v2.2.1/qualimap comp-counts -pe -id gene_id -type exon -out qualimap.count -gtf Homo_sapiens.GRCh37.75.chr.gtf -bam HG00104.1.M_111124_5.bam\n /usr/bin/time -v htseq-count -s no -f bam -r pos HG00104.1.M_111124_5.bam Homo_sapiens.GRCh37.75.chr.gtf > htseq.count\n rm Homo_sapiens.GRCh37.75.chr.gtf\n\n # DNA benchmark\n zcat ${BASEDIR}/../maps/exonic.hg19.bed.gz | sed 's/^chr//' | gzip -c > exonic.hg19.bed.gz\n # QC\n /usr/bin/time -v ${BASEDIR}/../src/alfred qc -j qc.json.gz -b exonic.hg19.bed.gz -r 1kGP.fa HG00111.mapped.ILLUMINA.bwa.GBR.exome.20120522.bam\n /usr/bin/time -v ./qualimap_v2.2.1/qualimap bamqc --java-mem-size=4G -nt 1 -bam HG00111.mapped.ILLUMINA.bwa.GBR.exome.20120522.bam -sd -gd \"HUMAN - hg19\" -outformat PDF:HTML\n # Window counting\n /usr/bin/time -v ${BASEDIR}/../src/alfred count_dna -i exonic.hg19.bed.gz -m 0 HG00111.mapped.ILLUMINA.bwa.GBR.exome.20120522.bam\n /usr/bin/time -v bedtools multicov -p -bams HG00111.mapped.ILLUMINA.bwa.GBR.exome.20120522.bam -bed exonic.hg19.bed.gz > bedtools.count\n rm exonic.hg19.bed.gz\n\n # Browser Tracks\n /usr/bin/time -v ${BASEDIR}/../src/alfred tracks HG00111.mapped.ILLUMINA.bwa.GBR.exome.20120522.bam\n /usr/bin/time -v makeTagDirectory tagdir -genome 1kGP.fa HG00111.mapped.ILLUMINA.bwa.GBR.exome.20120522.bam\n /usr/bin/time -v makeUCSCfile tagdir -style dnase -fsize 5e7 -o homer.bedGraph\nelse\n echo \"Unknown mode ${1}\"\nfi\n\n" }, { "alpha_fraction": 0.5744184851646423, "alphanum_fraction": 0.5891146659851074, "avg_line_length": 36.44402313232422, "blob_id": "e02dea972369c9557a4eaea22b45311bac224e4b", "content_id": "36de765a98f1c142d7bfe1d67756781b2efe1679", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 19733, "license_type": "permissive", "max_line_length": 264, "num_lines": 527, "path": "/src/tracks.h", "repo_name": "tobiasrausch/bamStats", "src_encoding": "UTF-8", "text": "#ifndef TRACKS_H\n#define TRACKS_H\n\n#include <limits>\n\n#include <boost/icl/split_interval_map.hpp>\n#include <boost/dynamic_bitset.hpp>\n#include <boost/unordered_map.hpp>\n#include <boost/date_time/posix_time/posix_time.hpp>\n#include <boost/date_time/gregorian/gregorian.hpp>\n#include <boost/progress.hpp>\n\n#include <htslib/sam.h>\n\n#include \"version.h\"\n#include \"util.h\"\n\n\nnamespace bamstats\n{\n\n struct TrackConfig {\n bool wiggleFormat;\n uint16_t covtype; // 0: sequencing coverage, 1: spanning coverage, 2: footprints\n uint16_t minQual;\n uint32_t normalize;\n float resolution;\n std::string sampleName;\n std::string format;\n boost::filesystem::path bamFile;\n boost::filesystem::path outfile;\n };\n\n struct Track {\n uint32_t start;\n uint32_t end;\n double score;\n Track(uint32_t s, uint32_t e, double sc) : start(s), end(e), score(sc) {}\n };\n \n template<typename TConfig>\n inline int32_t\n create_tracks(TConfig const& c) {\n // Load bam file\n samFile* samfile = sam_open(c.bamFile.string().c_str(), \"r\");\n hts_idx_t* idx = sam_index_load(samfile, c.bamFile.string().c_str());\n bam_hdr_t* hdr = sam_hdr_read(samfile);\n\n // Pair qualities and features\n typedef boost::unordered_map<std::size_t, uint8_t> TQualities;\n TQualities qualities;\n\n // Normalize read-counts\n double normFactor = 1;\n if (c.normalize) {\n uint64_t totalPairs = 0;\n boost::posix_time::ptime now = boost::posix_time::second_clock::local_time();\n std::cout << '[' << boost::posix_time::to_simple_string(now) << \"] \" << \"Total read count normalization\" << std::endl;\n boost::progress_display show_progress( hdr->n_targets );\n for(int32_t refIndex=0; refIndex < (int32_t) hdr->n_targets; ++refIndex) {\n\t++show_progress;\n\n\thts_itr_t* iter = sam_itr_queryi(idx, refIndex, 0, hdr->target_len[refIndex]);\n\tbam1_t* rec = bam_init1();\n\tint32_t lastAlignedPos = 0;\n\tstd::set<std::size_t> lastAlignedPosReads;\n\twhile (sam_itr_next(samfile, iter, rec) >= 0) {\n\t if (rec->core.flag & (BAM_FSECONDARY | BAM_FQCFAIL | BAM_FDUP | BAM_FSUPPLEMENTARY | BAM_FUNMAP)) continue;\n\t if (rec->core.qual < c.minQual) continue;\n\t if (rec->core.flag & BAM_FPAIRED) {\n\t if ((rec->core.flag & BAM_FMUNMAP) || (rec->core.tid != rec->core.mtid)) continue;\n\t \n\t // Clean-up the read store for identical alignment positions\n\t if (rec->core.pos > lastAlignedPos) {\n\t lastAlignedPosReads.clear();\n\t lastAlignedPos = rec->core.pos;\n\t }\n\t\n\t if ((rec->core.pos < rec->core.mpos) || ((rec->core.pos == rec->core.mpos) && (lastAlignedPosReads.find(hash_string(bam_get_qname(rec))) == lastAlignedPosReads.end()))) {\n\t // First read\n\t lastAlignedPosReads.insert(hash_string(bam_get_qname(rec)));\n\t std::size_t hv = hash_pair(rec);\n\t qualities[hv] = rec->core.qual;\n\t } else {\n\t // Second read\n\t std::size_t hv = hash_pair_mate(rec);\n\t if (qualities.find(hv) == qualities.end()) continue; // Mate discarded\n\t uint8_t pairQuality = std::min((uint8_t) qualities[hv], (uint8_t) rec->core.qual);\n\t qualities[hv] = 0;\n\t \n\t // Pair quality\n\t if (pairQuality < c.minQual) continue; // Low quality pair\n\t \n\t \n\t // Get bases\n\t uint32_t* cigar = bam_get_cigar(rec);\n\t for (std::size_t i = 0; i < rec->core.n_cigar; ++i) {\n\t\tif ((bam_cigar_op(cigar[i]) == BAM_CMATCH) || (bam_cigar_op(cigar[i]) == BAM_CEQUAL) || (bam_cigar_op(cigar[i]) == BAM_CDIFF)) {\n\t\t totalPairs += bam_cigar_oplen(cigar[i]);\n\t\t}\n\t }\n\t }\n\t } else {\n\t // Single end\n\t uint32_t* cigar = bam_get_cigar(rec);\n\t for (std::size_t i = 0; i < rec->core.n_cigar; ++i) {\n\t if ((bam_cigar_op(cigar[i]) == BAM_CMATCH) || (bam_cigar_op(cigar[i]) == BAM_CEQUAL) || (bam_cigar_op(cigar[i]) == BAM_CDIFF)) {\n\t\ttotalPairs += bam_cigar_oplen(cigar[i]);\n\t }\n\t }\n\t }\n\t}\n\t// Clean-up\n\tbam_destroy1(rec);\n\thts_itr_destroy(iter);\n\tqualities.clear();\n }\n // Normalize to 100bp paired-end reads\n normFactor = ((double) ((uint64_t) (c.normalize)) / (double) totalPairs) * 100 * 2;\n }\n \n // Open output file\n boost::iostreams::filtering_ostream dataOut;\n dataOut.push(boost::iostreams::gzip_compressor());\n dataOut.push(boost::iostreams::file_sink(c.outfile.string().c_str(), std::ios_base::out | std::ios_base::binary));\n if (c.format == \"bedgraph\") {\n // bedgraph\n dataOut << \"track type=bedGraph name=\\\"\" << c.sampleName << \"\\\" description=\\\"\" << c.sampleName << \"\\\" visibility=full color=44,162,95\" << std::endl;\n } else if (c.format == \"wiggle\") {\n // wiggle\n dataOut << \"track type=wiggle_0 name=\\\"\" << c.sampleName << \"\\\" description=\\\"\" << c.sampleName << \"\\\" visibility=full color=44,162,95\" << std::endl; \n } else if (c.format == \"raw\") {\n dataOut << \"chr\\tpos\\t\" << c.sampleName << std::endl;\n } else {\n // bed\n dataOut << \"chr\\tstart\\tend\\tid\\t\" << c.sampleName << std::endl;\n }\n\n // Iterate chromosomes\n boost::posix_time::ptime now = boost::posix_time::second_clock::local_time();\n std::cout << '[' << boost::posix_time::to_simple_string(now) << \"] \" << \"BAM file parsing\" << std::endl;\n boost::progress_display show_progress( hdr->n_targets );\n for(int32_t refIndex=0; refIndex < (int32_t) hdr->n_targets; ++refIndex) {\n ++show_progress;\n if (c.wiggleFormat) dataOut << \"fixedStep chrom=\" << hdr->target_name[refIndex] << \" start=1 step=1\" << std::endl;\n\n // Find valid pairs\n std::set<std::size_t> validPairs;\n {\n\thts_itr_t* iter = sam_itr_queryi(idx, refIndex, 0, hdr->target_len[refIndex]);\n\tbam1_t* rec = bam_init1();\n\tint32_t lastAlignedPos = 0;\n\tstd::set<std::size_t> lastAlignedPosReads;\n\twhile (sam_itr_next(samfile, iter, rec) >= 0) {\n\t if (rec->core.flag & (BAM_FSECONDARY | BAM_FQCFAIL | BAM_FDUP | BAM_FSUPPLEMENTARY | BAM_FUNMAP)) continue;\n\t if (rec->core.qual < c.minQual) continue;\n\t if (rec->core.flag & BAM_FPAIRED) {\n\t if ((rec->core.flag & BAM_FMUNMAP) || (rec->core.tid != rec->core.mtid)) continue;\n\n\t // Clean-up the read store for identical alignment positions\n\t if (rec->core.pos > lastAlignedPos) {\n\t lastAlignedPosReads.clear();\n\t lastAlignedPos = rec->core.pos;\n\t }\n\t \n\t if ((rec->core.pos < rec->core.mpos) || ((rec->core.pos == rec->core.mpos) && (lastAlignedPosReads.find(hash_string(bam_get_qname(rec))) == lastAlignedPosReads.end()))) {\n\t // First read\n\t lastAlignedPosReads.insert(hash_string(bam_get_qname(rec)));\n\t std::size_t hv = hash_pair(rec);\n\t qualities[hv] = rec->core.qual;\n\t } else {\n\t // Second read\n\t std::size_t hv = hash_pair_mate(rec);\n\t if (qualities.find(hv) == qualities.end()) continue; // Mate discarded\n\t uint8_t pairQuality = std::min((uint8_t) qualities[hv], (uint8_t) rec->core.qual);\n\t qualities[hv] = 0;\n\n\t // Pair quality\n\t if (pairQuality < c.minQual) continue; // Low quality pair\n\n\t // Insert valid pair\n\t validPairs.insert(hash_pair_mate(rec));\n\t }\n\t } else {\n\t // Single-end\n\t validPairs.insert(hash_read(rec));\n\t }\n\t}\n\t// Clean-up\n\tbam_destroy1(rec);\n\thts_itr_destroy(iter);\n\tqualities.clear();\n }\n\n // Create Coverage track\n typedef uint16_t TCount;\n uint32_t maxCoverage = std::numeric_limits<TCount>::max();\n typedef std::vector<TCount> TCoverage;\n TCoverage cov;\n TCoverage scov;\n if (c.covtype == 0) cov.resize(hdr->target_len[refIndex], 0);\n else {\n\tcov.resize(hdr->target_len[refIndex], 0);\n\tscov.resize(hdr->target_len[refIndex], 0);\n }\n if (validPairs.size()) {\n\thts_itr_t* iter = sam_itr_queryi(idx, refIndex, 0, hdr->target_len[refIndex]);\n\tbam1_t* rec = bam_init1();\n\tint32_t lastAlignedPos = 0;\n\tstd::set<std::size_t> lastAlignedPosReads;\n\twhile (sam_itr_next(samfile, iter, rec) >= 0) {\n\t if (rec->core.flag & (BAM_FSECONDARY | BAM_FQCFAIL | BAM_FDUP | BAM_FSUPPLEMENTARY | BAM_FUNMAP)) continue;\n\t if (rec->core.qual < c.minQual) continue;\n\t if (rec->core.flag & BAM_FPAIRED) {\n\t if ((rec->core.flag & BAM_FMUNMAP) || (rec->core.tid != rec->core.mtid)) continue;\n\n\t // Clean-up the read store for identical alignment positions\n\t if (rec->core.pos > lastAlignedPos) {\n\t lastAlignedPosReads.clear();\n\t lastAlignedPos = rec->core.pos;\n\t }\n\n\t if (c.covtype > 0) {\n\t // Spanning coverage\n\t if ((rec->core.pos < rec->core.mpos) || ((rec->core.pos == rec->core.mpos) && (lastAlignedPosReads.find(hash_string(bam_get_qname(rec))) == lastAlignedPosReads.end()))) {\n\t\t// Do nothing\n\t } else {\n\t\tstd::size_t hv = hash_pair_mate(rec);\n\t\tif ((validPairs.find(hv) != validPairs.end()) && (layout(rec) == 2)) {\n\t\t int32_t pStart = rec->core.mpos + 50;\n\t\t int32_t pEnd = rec->core.pos - 50;\n\t\t if (pStart < pEnd) {\n\t\t for(int32_t i = pStart; i < pEnd; ++i) {\n\t\t if (scov[i] < maxCoverage) ++scov[i];\n\t\t }\n\t\t }\n\t\t}\n\t }\n\t }\n\t if ((c.covtype == 0) || (c.covtype == 2)) {\n\t // Sequence coverage\n\t std::size_t hv = 0;\n\t if ((rec->core.pos < rec->core.mpos) || ((rec->core.pos == rec->core.mpos) && (lastAlignedPosReads.find(hash_string(bam_get_qname(rec))) == lastAlignedPosReads.end()))) hv = hash_pair(rec);\n\t else hv = hash_pair_mate(rec);\n\t if (validPairs.find(hv) != validPairs.end()) {\n\n\t\t// Reference pointer\n\t\tuint32_t rp = rec->core.pos;\n\t \n\t\t// Parse the CIGAR\n\t\tuint32_t* cigar = bam_get_cigar(rec);\n\t\tfor (std::size_t i = 0; i < rec->core.n_cigar; ++i) {\n\t\t if ((bam_cigar_op(cigar[i]) == BAM_CMATCH) || (bam_cigar_op(cigar[i]) == BAM_CEQUAL) || (bam_cigar_op(cigar[i]) == BAM_CDIFF)) {\n\t\t // match or mismatch\n\t\t for(std::size_t k = 0; k<bam_cigar_oplen(cigar[i]);++k) {\n\t\t if (cov[rp] < maxCoverage) ++cov[rp];\n\t\t ++rp;\n\t\t }\n\t\t }\n\t\t else if (bam_cigar_op(cigar[i]) == BAM_CDEL) rp += bam_cigar_oplen(cigar[i]);\n\t\t else if (bam_cigar_op(cigar[i]) == BAM_CREF_SKIP) rp += bam_cigar_oplen(cigar[i]);\n\t\t}\n\t }\n\t }\n\t } else {\n\t // Single end, only sequencing coverage is possible\n\t if (c.covtype == 0) {\n\t std::size_t hv = hash_read(rec);\n\t if (validPairs.find(hv) != validPairs.end()) {\n\t\t\n\t\t// Reference pointer\n\t\tuint32_t rp = rec->core.pos;\n\t\t\n\t\t// Parse the CIGAR\n\t\tuint32_t* cigar = bam_get_cigar(rec);\n\t\tfor (std::size_t i = 0; i < rec->core.n_cigar; ++i) {\n\t\t if ((bam_cigar_op(cigar[i]) == BAM_CMATCH) || (bam_cigar_op(cigar[i]) == BAM_CEQUAL) || (bam_cigar_op(cigar[i]) == BAM_CDIFF)) {\n\t\t // match or mismatch\n\t\t for(std::size_t k = 0; k<bam_cigar_oplen(cigar[i]);++k) {\n\t\t if (cov[rp] < maxCoverage) ++cov[rp];\n\t\t ++rp;\n\t\t }\n\t\t }\n\t\t else if (bam_cigar_op(cigar[i]) == BAM_CDEL) rp += bam_cigar_oplen(cigar[i]);\n\t\t else if (bam_cigar_op(cigar[i]) == BAM_CREF_SKIP) rp += bam_cigar_oplen(cigar[i]);\n\t\t}\n\t }\n\t }\n\t }\n\t}\n\t// Clean-up\n\tbam_destroy1(rec);\n\thts_itr_destroy(iter);\n\n\t// Coverage mangling\n\tif (c.covtype == 1) {\n\t // Spanning coverage\n\t for(uint32_t i = 0; i < scov.size(); ++i) cov[i] = scov[i];\n\t} else if (c.covtype == 2) {\n\t // slope window\n\t uint32_t slopewin = 85;\n\t TCoverage covStore(slopewin, 0);\t \n\t for(uint32_t i = 0; i < scov.size(); ++i) {\n\t TCount oldCovVal = covStore[i % 50];\n\t covStore[i % 50] = cov[i];\n\t if ((i < slopewin) || (i + slopewin >= scov.size())) cov[i] = 0;\n\t else {\n\t int32_t spanPeakLeft = (int32_t) scov[i] - (int32_t) scov[i - slopewin];\n\t int32_t spanPeakRight = (int32_t) scov[i] - (int32_t) scov[i + slopewin];\n\t int32_t spanPeak = (spanPeakLeft + spanPeakRight) / 2;\n\t int32_t covPeakLeft = (int32_t) cov[i] - (int32_t) oldCovVal;\n\t int32_t covPeakRight = (int32_t) cov[i] - (int32_t) cov[i + slopewin];\n\t int32_t covPeak = (covPeakLeft + covPeakRight) / 2;\n\t int32_t footprint = spanPeak - covPeak;\n\t if (spanPeak < footprint) footprint = spanPeak;\n\t if (footprint < 0) cov[i] = 0;\n\t else if (footprint < (int32_t) maxCoverage) cov[i] = (TCount) footprint;\n\t else cov[i] = maxCoverage;\n\t }\n\t }\n\t}\n\n\t// Coverage track\n\tif (c.wiggleFormat) {\n\t if (c.normalize) {\n\t for(uint32_t i = 0; i < cov.size(); ++i) dataOut << normFactor * cov[i] << std::endl;\n\t } else {\n\t for(uint32_t i = 0; i < cov.size(); ++i) dataOut << cov[i] << std::endl;\n\t }\n\t} else if (c.format == \"raw\") {\n\t if (c.normalize) {\n\t for(uint32_t i = 0; i < cov.size(); ++i) dataOut << hdr->target_name[refIndex] << '\\t' << (i+1) << '\\t' << normFactor * cov[i] << std::endl;\n\t } else {\n\t for(uint32_t i = 0; i < cov.size(); ++i) dataOut << hdr->target_name[refIndex] << '\\t' << (i+1) << '\\t' << cov[i] << std::endl;\n\t }\n\t} else {\n\t typedef std::list<Track> TrackLine;\n\t TrackLine tl;\n\t uint32_t wb = 0;\n\t uint32_t we = 0;\n\t double wval = cov[0];\n\t for(uint32_t i = 1; i<cov.size(); ++i) {\n\t if (cov[i] == wval) ++we;\n\t else {\n\t tl.push_back(Track(wb, we+1, normFactor * wval));\n\t wb = i;\n\t we = i;\n\t wval = cov[i];\n\t }\n\t }\n\t tl.push_back(Track(wb, we+1, normFactor * wval));\n\t \n\t // Reduce file size\n\t if ((c.resolution > 0) && (c.resolution < 1)) {\n\t double red = 1;\n\t uint32_t origs = tl.size();\n\t while ((tl.size() > 1) && (red > c.resolution)) {\n\t TrackLine::iterator idx = tl.begin();\n\t TrackLine::iterator idxNext = tl.begin();\n\t ++idxNext;\n\t std::vector<double> errs;\n\t for(;idxNext != tl.end(); ++idx, ++idxNext) {\n\t\tuint32_t w1 = idx->end - idx->start;\n\t\tuint32_t w2 = idxNext->end - idxNext->start;\n\t\tdouble nwavg = (w1 * idx->score + w2 * idxNext->score) / (w1 + w2);\n\t\tdouble nerr = w1 * ((idx->score - nwavg) * (idx->score - nwavg));\n\t\tnerr += w2 * ((idxNext->score - nwavg) * (idxNext->score - nwavg));\n\t\terrs.push_back(nerr);\n\t }\n\t std::sort(errs.begin(), errs.end());\n\t uint32_t bpidx = (red - c.resolution) * tl.size();\n\t if (bpidx > 0) bpidx = bpidx - 1;\n\t double thres = errs[bpidx];\n\t idx = tl.begin();\n\t idxNext = tl.begin();\n\t ++idxNext;\n\t while(idxNext != tl.end()) {\n\t\tuint32_t w1 = idx->end - idx->start;\n\t\tuint32_t w2 = idxNext->end - idxNext->start;\n\t\tdouble nwavg = (w1 * idx->score + w2 * idxNext->score) / (w1 + w2);\n\t\tdouble nerr = w1 * ((idx->score - nwavg) * (idx->score - nwavg));\n\t\tnerr += w2 * ((idxNext->score - nwavg) * (idxNext->score - nwavg));\n\t\tif (nerr <= thres) {\n\t\t ++idxNext;\n\t\t uint32_t oldst = idx->start;\n\t\t tl.erase(idx++);\n\t\t idx->start = oldst;\n\t\t idx->score = nwavg;\n\t\t} else {\n\t\t ++idxNext;\n\t\t ++idx;\n\t\t}\n\t }\n\t red = (double) tl.size() / (double) origs;\n\t }\n\t }\n\t if (c.format == \"bedgraph\") {\n\t for(TrackLine::iterator idx = tl.begin(); idx != tl.end(); ++idx) dataOut << hdr->target_name[refIndex] << \"\\t\" << idx->start << \"\\t\" << idx->end << \"\\t\" << idx->score << std::endl;\n\t } else {\n\t for(TrackLine::iterator idx = tl.begin(); idx != tl.end(); ++idx) dataOut << hdr->target_name[refIndex] << \"\\t\" << idx->start << \"\\t\" << idx->end << \"\\t\" << hdr->target_name[refIndex] << \":\" << idx->start << \"-\" << idx->end << \"\\t\" << idx->score << std::endl;\n\t }\n\t}\n }\n }\n \n // clean-up\n bam_hdr_destroy(hdr);\n hts_idx_destroy(idx);\n sam_close(samfile);\n dataOut.pop();\n \n return 0;\n }\n\n\n int tracks(int argc, char **argv) {\n TrackConfig c;\n\n // Parameter\n boost::program_options::options_description generic(\"Generic options\");\n generic.add_options()\n (\"help,?\", \"show help message\")\n (\"map-qual,m\", boost::program_options::value<uint16_t>(&c.minQual)->default_value(10), \"min. mapping quality\")\n (\"normalize,n\", boost::program_options::value<uint32_t>(&c.normalize)->default_value(30000000), \"#pairs to normalize to (0: no normalization)\")\n (\"covtype,c\", boost::program_options::value<uint16_t>(&c.covtype)->default_value(0), \"coverage type (0: sequencing coverage, 1: spanning coverage, 2: footprinting)\")\n ;\n\n boost::program_options::options_description resolution(\"Resolution options (bedgraph/bed format)\");\n resolution.add_options()\n (\"resolution,r\", boost::program_options::value<float>(&c.resolution)->default_value(0.2), \"fractional resolution ]0,1]\")\n ; \n\n boost::program_options::options_description otp(\"Output options\");\n otp.add_options()\n (\"outfile,o\", boost::program_options::value<boost::filesystem::path>(&c.outfile)->default_value(\"track.gz\"), \"track file\")\n (\"format,f\", boost::program_options::value<std::string>(&c.format)->default_value(\"bedgraph\"), \"output format [bedgraph|bed|wiggle|raw]\")\n ;\n\n boost::program_options::options_description hidden(\"Hidden options\");\n hidden.add_options()\n (\"input-file\", boost::program_options::value<boost::filesystem::path>(&c.bamFile), \"input bam file\")\n ;\n\n boost::program_options::positional_options_description pos_args;\n pos_args.add(\"input-file\", -1);\n\n // Set the visibility\n boost::program_options::options_description cmdline_options;\n cmdline_options.add(generic).add(resolution).add(otp).add(hidden);\n boost::program_options::options_description visible_options;\n visible_options.add(generic).add(resolution).add(otp);\n\n // Parse command-line\n boost::program_options::variables_map vm;\n boost::program_options::store(boost::program_options::command_line_parser(argc, argv).options(cmdline_options).positional(pos_args).run(), vm);\n boost::program_options::notify(vm);\n\n // Check command line arguments\n if ((vm.count(\"help\")) || (!vm.count(\"input-file\"))) {\n std::cout << std::endl;\n std::cout << \"Usage: alfred \" << argv[0] << \" [OPTIONS] <aligned.bam>\" << std::endl;\n std::cout << visible_options << \"\\n\";\n return 1;\n }\n\n // Wiggle format at 1bp resolution\n c.wiggleFormat = false;\n if (vm.count(\"format\")) {\n if (c.format == \"wiggle\") {\n\tstd::cerr << \"Warning: Wiggle format triggers single-basepair resolution coverage track!\" << std::endl;\n\tc.wiggleFormat = true;\n }\n }\n\n // Coverage type\n if (c.covtype > 2) c.covtype = 0;\n\n // Check bam file\n if (!(boost::filesystem::exists(c.bamFile) && boost::filesystem::is_regular_file(c.bamFile) && boost::filesystem::file_size(c.bamFile))) {\n std::cerr << \"Alignment file is missing: \" << c.bamFile.string() << std::endl;\n return 1;\n } else {\n samFile* samfile = sam_open(c.bamFile.string().c_str(), \"r\");\n if (samfile == NULL) {\n\tstd::cerr << \"Fail to open file \" << c.bamFile.string() << std::endl;\n\treturn 1;\n }\n hts_idx_t* idx = sam_index_load(samfile, c.bamFile.string().c_str());\n if (idx == NULL) {\n\tif (bam_index_build(c.bamFile.string().c_str(), 0) != 0) {\n\t std::cerr << \"Fail to open index for \" << c.bamFile.string() << std::endl;\n\t return 1;\n\t}\n }\n bam_hdr_t* hdr = sam_hdr_read(samfile);\n if (hdr == NULL) {\n\tstd::cerr << \"Fail to open header for \" << c.bamFile.string() << std::endl;\n\treturn 1;\n }\n\n // Get sample name\n std::string sampleName;\n if (!getSMTag(std::string(hdr->text), c.bamFile.stem().string(), sampleName)) {\n\tstd::cerr << \"Only one sample (@RG:SM) is allowed per input BAM file \" << c.bamFile.string() << std::endl;\n\treturn 1;\n } else c.sampleName = sampleName;\n\n // Clean-up\n bam_hdr_destroy(hdr);\n hts_idx_destroy(idx);\n sam_close(samfile);\n }\n\n // Show cmd\n boost::posix_time::ptime now = boost::posix_time::second_clock::local_time();\n std::cout << '[' << boost::posix_time::to_simple_string(now) << \"] \";\n std::cout << \"alfred \";\n for(int i=0; i<argc; ++i) { std::cout << argv[i] << ' '; }\n std::cout << std::endl;\n\n return create_tracks(c);\n }\n\n \n}\n\n#endif\n" }, { "alpha_fraction": 0.6305524110794067, "alphanum_fraction": 0.6344532370567322, "avg_line_length": 59.20454406738281, "blob_id": "074f4daa759e76e263eccc9af4f0d9f32a1804d4", "content_id": "f77bdfcd8b78bda47929321acc46e8efee1cba86", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 8043, "license_type": "permissive", "max_line_length": 146, "num_lines": 132, "path": "/docs/webapp/README.md", "repo_name": "tobiasrausch/bamStats", "src_encoding": "UTF-8", "text": "# Web application\n\nAlfred's quality control JSON files can be interactively browsed with the\n[companion web application](https://www.gear-genomics.com/alfred).\nAll charts support panning and zooming and can be downloaded as PNG images.\nThe summary QC table can be downloaded as a CSV file.\n\nTo generate a quality control file in JSON format run [Alfred's command-line tool](/cli/) as follows:\n\n```bash\nalfred qc -r <ref.fa> -f json -o qc.json.gz <align.bam>\n```\n\nThe output file `qc.json.gz` can then be uploaded at\n[https://www.gear-genomics.com/alfred/](https://www.gear-genomics.com/alfred/).\n\n## Features\n\nAn overview of all available charts and the most important alignment statistics provided by Alfred is below.\n\n| Alignment Metric | DNA-Seq (WGS) | DNA-Seq (Capture) | RNA-Seq | ChIP-Seq/ATAC-Seq | Chart Type |\n| ------------------------------ | ------------- | ----------------- | ------- | ----------------- | ------------------ |\n| Mapping Statistics | ✔ | ✔ | ✔ | ✔ | Table |\n| Duplicate Statistics | ✔ | ✔ | ✔ | ✔ | Table |\n| Sequencing Error Rates | ✔ | ✔ | ✔ | ✔ | Table |\n| Base Content Distribution | ✔ | ✔ | ✔ | ✔ | Grouped Line Chart |\n| Read Length Distribution | ✔ | ✔ | ✔ | ✔ | Line Chart |\n| Base Quality Distribution | ✔ | ✔ | ✔ | ✔ | Line Chart |\n| Coverage Histogram | ✔ | ✔ | ✔ | ✔ | Line Chart |\n| Insert Size Distribution | ✔ | ✔ | ✔ | ✔ | Grouped Line Chart |\n| InDel Size Distribution | ✔ | ✔ | ✔ | ✔ | Grouped Line Chart |\n| InDel Context | ✔ | ✔ | ✔ | ✔ | Bar Chart |\n| GC Content | ✔ | ✔ | ✔ | ✔ | Grouped Line Chart |\n| On-Target Rate | | ✔ | | | Line Chart |\n| Target Coverage Distribution | | ✔ | | | Line Chart |\n| TSS Enrichment | | | | ✔ | Table |\n| DNA pitch / Nucleosome pattern | | | | ✔ | Grouped Line Chart |\n\n## Base content distribution\n\nThe base content distribution shows any base calling bias along the read.\nFor an ideal library the lines for A, C, G, and T should run in parallel.\nFor a whole-genome assay the GC-content of that genome should be reflected in the relative amounts of each base.\nSome libraries are expected to show a biased leading base distribution such as many RNA-Seq libraries\nbecause of random hexamer priming or restriction based assays.\n\n## Read length distribution\n\nIllumina sequencers produce reads of fixed read length but long read technologies usually have a median read length >1000bp\nand a long tail of reads with read lengths >30,000bp. This plot is also insightful to understand adapter trimming results\nor the removal of low quality bases at the start or end of a read.\n\n## Mean base quality distribution\n\nThis plot shows the mean base quality along the read. A typical Illumina profile shows base qualities >Q30\nbefore base 30 and then a gradual loss of base quality accuracy towards the end of the read.\n\n## Mapping quality distribution\n\nThis plot shows the mapping quality distribution for all mapped reads. The reported quality scores are aligner-dependent.\n\n## Coverage histogram\n\nThe coverage histogram shows how many bases of the sequenced genome are at a given coverage.\nPlease note that for targeted assays (capture assays) this plot is expected to show a large portion of the genome at coverage=0.\nFor targeted assays, we therefore recommend checking the on-target rate and the targets above coverage level plots.\n\n## On-target rate and targets above a given coverage level\n\nFor targeted assays, the two major concerns are capture efficiency (on-target rate)\nand how many of the targets are ready for downstream analysis\n(targets above a pre-defined coverage threshold).\nA standard whole-exome sequencing assay yields at least 70% of reads on-target\n(+/-200bp target extension) and at least 70% of targets >20x coverage.\n\n## Insert size histogram\n\nThe insert size plot shows the outer insert size distribution for all read pairs\nstratified by read pair orientation. There are different nomenclatures around for\ndefining the different paired-end layouts. The default Illumina paired-end layout is R+\n(or forward-reverse, FR), the default Illumina mate-pair layout is R- (or reverse-forward, RF).\nFor specific sequencing assays, the insert size distribution can serve as a key quality control metric.\nFor instance, ATAC-Seq libraries should show the characteristic nucleosome pattern and DNA pitch.\n\n## InDel size distribution\n\nHistogram of indel sizes collected from all mapped reads. This plot aggregates the length\nof all Cigar `I` and `D` operations.\n\n## InDel Homopolymer Context\n\nThe homopolymer plot shows for all InDels (Cigar I and D operations) if the preceding 3 bases are\nall A, all C, all G, or all T. If at least 2 different nucleotides are observed the reported\nhomopolymer context is \"None\". For Illumina reads, almost 50% of all reported InDels occur in a\nhomopolymer context with greater counts for A and T compared to G and C.\n\n## GC content\n\nTo estimate a GC bias curve even for low-coverage single-cell data, Alfred computes for each mapped read\nthe local GC-content and then compares the estimated sample GC content to the expected, genome-wide GC content.\nIf a targeted assay is analyzed, Alfred, in addition, computes the GC content of all target regions.\n\n## GC-Content and Mapping Statistics by Chromosome\n\nThis table lists the size, the number of Ns, the GC-content, and the number of mapped reads for each chromosome\nas well as the observed-to-expected ratio of mapped reads.\n\n## Summary statistics\n\nThe summary tab aggregates quality control data in a simple table that can be downloaded in CSV format.\nThis table is ideal to compare QC metrics across samples and/or sequencing assays. Among many other statistics,\nthe table lists, for instance, the number of duplicate reads, the number of unmapped reads, the number of\nsecondary and supplementary alignments, base-pair exact error rates stratified by mismatch,\ninsertion and deletion errors, and the median coverage and insert size of the sequenced sample.\nThe table provides more detailed statistics for specialized assays, i.e.\nfor 10X Genomics it lists the number of MI tagged reads, the total number of UMIs,\nthe fraction of haplotype-tagged reads and the N50 phased block length.\nFor ATAC-Seq data, users can provide a BED file of promoter regions and then the `EnrichmentOverBed` column\ncorresponds to TSS enrichment whereas for WES data, the enrichment quantifies the capturing efficiency\nif the BED file contains all target regions.\n\n## Example Data Sets\n\nThe [web application](https://www.gear-genomics.com/alfred) hosts example data sets for a number of sequencing assays and sequencing technologies.\n\n| Sequencing Assay | Sequencing Technology |\n| ----------------- | --------------------- |\n| DNA-Seq (WGS) | Illumina, PacBio, ONT |\n| DNA-Seq (Capture) | Illumina |\n| RNA-Seq | Illumina |\n| ATAC-Seq | Illumina |\n| ChIP-Seq | Illumina |\n" }, { "alpha_fraction": 0.6333100199699402, "alphanum_fraction": 0.658502459526062, "avg_line_length": 28.77083396911621, "blob_id": "ccbfb208f8578f9734918dd3a0a5de4f4236965f", "content_id": "6d9a169f7c612070c368210e5c84e5521bd56d9b", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1429, "license_type": "permissive", "max_line_length": 121, "num_lines": 48, "path": "/src/tenX.h", "repo_name": "tobiasrausch/bamStats", "src_encoding": "UTF-8", "text": "#ifndef TENX_H\n#define TENX_H\n\n#include <boost/unordered_map.hpp>\n#include <boost/algorithm/string.hpp>\n#include <htslib/sam.h>\n\n\nnamespace bamstats\n{\n\n template<typename TGenomicBlockRanges>\n inline int32_t\n phasedBlocks(TGenomicBlockRanges const& brange) {\n int32_t count = 0;\n for(int32_t refIndex = 0; refIndex < (int32_t) brange.size(); ++refIndex) count += brange[refIndex].size();\n return count;\n }\n \n template<typename TGenomicBlockRanges>\n inline int32_t\n n50PhasedBlockLength(TGenomicBlockRanges const& brange) {\n typedef typename TGenomicBlockRanges::value_type TBlockRange;\n typedef std::vector<int32_t> TSizes;\n TSizes sz;\n int64_t totalSize = 0;\n for(int32_t refIndex = 0; refIndex < (int32_t) brange.size(); ++refIndex) {\n for(typename TBlockRange::const_iterator itBR = brange[refIndex].begin(); itBR != brange[refIndex].end(); ++itBR) {\n\tif (itBR->second.first < itBR->second.second) {\n\t sz.push_back(itBR->second.second - itBR->second.first);\n\t totalSize += (itBR->second.second - itBR->second.first);\n\t}\n\telse std::cerr << \"Warning: Phased block start after phased block end!\" << std::endl;\n }\n }\n std::sort(sz.begin(), sz.end(), std::greater<int32_t>());\n totalSize /= 2;\n int64_t cumSize = 0;\n for(int32_t i = 0; i < (int32_t) sz.size(); ++i) {\n cumSize += sz[i];\n if (cumSize > totalSize) return sz[i];\n }\n return 0;\n }\n\n}\n\n#endif\n" }, { "alpha_fraction": 0.6410399079322815, "alphanum_fraction": 0.6495203375816345, "avg_line_length": 37.05820083618164, "blob_id": "42f931dc712e28e55336f43ac01f8c8d6e21cfd4", "content_id": "beb79f7b2c93f55f83c75ec6922599b64e01b32a", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 7193, "license_type": "permissive", "max_line_length": 195, "num_lines": 189, "path": "/src/gtf.h", "repo_name": "tobiasrausch/bamStats", "src_encoding": "UTF-8", "text": "#ifndef GTF_H\n#define GTF_H\n\n#include <boost/filesystem.hpp>\n#include <boost/algorithm/string.hpp>\n#include <boost/algorithm/string.hpp>\n#include <boost/iostreams/filtering_streambuf.hpp>\n#include <boost/iostreams/filtering_stream.hpp>\n#include <boost/iostreams/copy.hpp>\n#include <boost/iostreams/filter/gzip.hpp>\n#include <boost/iostreams/device/file.hpp>\n#include <boost/algorithm/string.hpp>\n\n#include <htslib/sam.h>\n\n#include \"util.h\"\n\nnamespace bamstats\n{\n\n template<typename TConfig, typename TGenomicRegions, typename TGeneIds, typename TProteinCoding>\n inline int32_t\n parseGTFAll(TConfig const& c, TGenomicRegions& overlappingRegions, TGeneIds& geneIds, TProteinCoding& pCoding) {\n boost::posix_time::ptime now = boost::posix_time::second_clock::local_time();\n std::cout << '[' << boost::posix_time::to_simple_string(now) << \"] \" << \"GTF feature parsing\" << std::endl;\n\n // Check gzip\n if (!is_gz(c.gtfFile)) {\n std::cerr << \"GTF file is not gzipped!\" << std::endl;\n return 0;\n }\n\n // Map IDs to integer\n typedef std::map<std::string, int32_t> TIdMap;\n TIdMap idMap;\n\n // Keep track of unique exon IDs\n int32_t eid = 0;\n\n // Parse GTF\n std::ifstream file(c.gtfFile.string().c_str(), std::ios_base::in | std::ios_base::binary);\n boost::iostreams::filtering_streambuf<boost::iostreams::input> dataIn;\n dataIn.push(boost::iostreams::gzip_decompressor());\n dataIn.push(file);\n std::istream instream(&dataIn);\n std::string gline;\n while(std::getline(instream, gline)) {\n if ((gline.size()) && (gline[0] == '#')) continue;\n typedef boost::tokenizer< boost::char_separator<char> > Tokenizer;\n boost::char_separator<char> sep(\"\\t\");\n Tokenizer tokens(gline, sep);\n Tokenizer::iterator tokIter = tokens.begin();\n if (tokIter==tokens.end()) {\n\tstd::cerr << \"Empty line in GTF file!\" << std::endl;\n\treturn 0;\n }\n std::string chrName=*tokIter++;\n if (c.nchr.find(chrName) == c.nchr.end()) continue;\n int32_t chrid = c.nchr.find(chrName)->second; \n if (tokIter == tokens.end()) {\n\tstd::cerr << \"Corrupted GTF file!\" << std::endl;\n\treturn 0;\n }\n ++tokIter;\n if (tokIter == tokens.end()) {\n\tstd::cerr << \"Corrupted GTF file!\" << std::endl;\n\treturn 0;\n }\n std::string ft = *tokIter++;\n if (ft == c.feature) {\n\tif (tokIter != tokens.end()) {\n\t int32_t start = boost::lexical_cast<int32_t>(*tokIter++);\n\t int32_t end = boost::lexical_cast<int32_t>(*tokIter++);\n\t ++tokIter; // score\n\t if (tokIter == tokens.end()) {\n\t std::cerr << \"Corrupted GTF file!\" << std::endl;\n\t return 0;\n\t }\n\t char strand = boost::lexical_cast<char>(*tokIter++);\n\t ++tokIter; // frame\n\t std::string attr = *tokIter;\n\t boost::char_separator<char> sepAttr(\";\");\n\t Tokenizer attrTokens(attr, sepAttr);\n\t for(Tokenizer::iterator attrIter = attrTokens.begin(); attrIter != attrTokens.end(); ++attrIter) {\n\t std::string keyval = *attrIter;\n\t boost::trim(keyval);\n\t boost::char_separator<char> sepKeyVal(\" \");\n\t Tokenizer kvTokens(keyval, sepKeyVal);\n\t Tokenizer::iterator kvTokensIt = kvTokens.begin();\n\t std::string key = *kvTokensIt++;\n\t if (key == c.idname) {\n\t std::string val = *kvTokensIt;\n\t if (val.size() >= 3) val = val.substr(1, val.size()-2); // Trim off the bloody \"\n\t int32_t idval = geneIds.size();\n\t typename TIdMap::const_iterator idIter = idMap.find(val);\n\t if (idIter == idMap.end()) {\n\t\tidMap.insert(std::make_pair(val, idval));\n\t\tgeneIds.push_back(val);\n\t\t// Protein Coding?\n\t\tbool pCode = false;\n\t\tfor(Tokenizer::iterator arIter = attrTokens.begin(); arIter != attrTokens.end(); ++arIter) {\n\t\t std::string kvl = *arIter;\n\t\t boost::trim(kvl);\n\t\t boost::char_separator<char> sKV2(\" \");\n\t\t Tokenizer kvT2(kvl, sKV2);\n\t\t Tokenizer::iterator kvT2It = kvT2.begin();\n\t\t std::string procod = *kvT2It++;\n\t\t if (procod == \"gene_biotype\") {\n\t\t std::string gbio = *kvT2It;\n\t\t if (gbio.size() >= 3) gbio = gbio.substr(1, gbio.size()-2);\n\t\t if (gbio == \"protein_coding\") pCode = true;\n\t\t }\n\t\t}\n\t\tpCoding.push_back(pCode);\n\t } else idval = idIter->second;\n\t // Convert to 0-based and right-open\n\t if (start == 0) {\n\t\tstd::cerr << \"GTF is 1-based format!\" << std::endl;\n\t\treturn 0;\n\t }\n\t if (start > end) {\n\t\tstd::cerr << \"Feature start is greater than feature end!\" << std::endl;\n\t\treturn 0;\n\t }\n\t //std::cerr << geneIds[idval] << \"\\t\" << start << \"\\t\" << end << std::endl;\n\t _insertInterval(overlappingRegions[chrid], start - 1, end, strand, idval, eid++);\n\t }\n\t }\n\t}\n }\n }\n return geneIds.size();\n }\n\n\n template<typename TConfig, typename TGenomicRegions, typename TGeneIds>\n inline int32_t\n parseGTFAll(TConfig const& c, TGenomicRegions& overlappingRegions, TGeneIds& geneIds) {\n std::vector<bool> pCoding;\n return parseGTFAll(c, overlappingRegions, geneIds, pCoding);\n }\n \n template<typename TConfig, typename TGenomicRegions, typename TGeneIds, typename TProteinCoding>\n inline int32_t\n parseGTF(TConfig const& c, TGenomicRegions& gRegions, TGeneIds& geneIds, TProteinCoding& pCoding) {\n typedef typename TGenomicRegions::value_type TChromosomeRegions;\n\n // Overlapping intervals for each label\n TGenomicRegions overlappingRegions;\n overlappingRegions.resize(gRegions.size(), TChromosomeRegions());\n parseGTFAll(c, overlappingRegions, geneIds, pCoding);\n \n // Make intervals non-overlapping for each label\n for(uint32_t refIndex = 0; refIndex < overlappingRegions.size(); ++refIndex) {\n // Sort by ID\n std::sort(overlappingRegions[refIndex].begin(), overlappingRegions[refIndex].end(), SortIntervalLabel<IntervalLabel>());\n int32_t runningId = -1;\n char runningStrand = '*';\n typedef boost::icl::interval_set<uint32_t> TIdIntervals;\n typedef typename TIdIntervals::interval_type TIVal;\n TIdIntervals idIntervals;\n for(uint32_t i = 0; i < overlappingRegions[refIndex].size(); ++i) {\n\tif (overlappingRegions[refIndex][i].lid != runningId) {\n\t for(typename TIdIntervals::iterator it = idIntervals.begin(); it != idIntervals.end(); ++it) {\n\t //std::cerr << \"merged\\t\" << geneIds[runningId] << \"\\t\" << it->lower() << \"\\t\" << it->upper() << std::endl; \n\t gRegions[refIndex].push_back(IntervalLabel(it->lower(), it->upper(), runningStrand, runningId));\n\t }\n\t idIntervals.clear();\n\t runningId = overlappingRegions[refIndex][i].lid;\n\t runningStrand = overlappingRegions[refIndex][i].strand;\n\t}\n\tidIntervals.insert(TIVal::right_open(overlappingRegions[refIndex][i].start, overlappingRegions[refIndex][i].end));\n }\n // Process last id\n for(typename TIdIntervals::iterator it = idIntervals.begin(); it != idIntervals.end(); ++it) gRegions[refIndex].push_back(IntervalLabel(it->lower(), it->upper(), runningStrand, runningId));\n }\n \n return geneIds.size();\n }\n\n template<typename TConfig, typename TGenomicRegions, typename TGeneIds>\n inline int32_t\n parseGTF(TConfig const& c, TGenomicRegions& gRegions, TGeneIds& geneIds) {\n std::vector<bool> pCoding;\n return parseGTF(c, gRegions, geneIds, pCoding);\n }\n}\n\n#endif\n" }, { "alpha_fraction": 0.63624107837677, "alphanum_fraction": 0.641427218914032, "avg_line_length": 34.05818176269531, "blob_id": "d21cb61f4734b965fce078a4e8df18e25a75fd17", "content_id": "38695a565693f9bf3fc7f67e91e39a23f9907f3e", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 9641, "license_type": "permissive", "max_line_length": 151, "num_lines": 275, "path": "/src/qc.h", "repo_name": "tobiasrausch/bamStats", "src_encoding": "UTF-8", "text": "#ifndef QC_H\n#define QC_H\n\n#include <iostream>\n#include <vector>\n#include <fstream>\n\n#include <boost/program_options/cmdline.hpp>\n#include <boost/program_options/options_description.hpp>\n#include <boost/program_options/parsers.hpp>\n#include <boost/program_options/variables_map.hpp>\n#include <boost/date_time/posix_time/posix_time.hpp>\n#include <boost/date_time/gregorian/gregorian.hpp>\n#include <boost/tokenizer.hpp>\n#include <boost/filesystem.hpp>\n#include <boost/progress.hpp>\n\n#include <htslib/sam.h>\n#include <htslib/faidx.h>\n\n#include \"bamstats.h\"\n#include \"util.h\"\n#include \"version.h\"\n\nnamespace bamstats\n{\n\nstruct ConfigQC {\n bool hasRegionFile;\n bool hasJsonOut;\n bool hasTsvOut;\n bool ignoreRG;\n bool singleRG;\n bool isHaplotagged;\n bool isMitagged;\n bool secondary;\n bool supplementary;\n float nXChrLen;\n uint32_t minChrLen;\n std::string rgname;\n std::string sampleName;\n boost::filesystem::path jsonout;\n boost::filesystem::path outfile;\n boost::filesystem::path genome;\n boost::filesystem::path regionFile;\n boost::filesystem::path bamFile;\n};\n\n\nint qc(int argc, char **argv) {\n ConfigQC c;\n c.isHaplotagged = false;\n c.isMitagged = false;\n c.minChrLen = 10000000;\n std::string sampleName;\n \n // Parameter\n boost::program_options::options_description generic(\"Generic options\");\n generic.add_options()\n (\"help,?\", \"show help message\")\n (\"reference,r\", boost::program_options::value<boost::filesystem::path>(&c.genome), \"reference fasta file (required)\")\n (\"bed,b\", boost::program_options::value<boost::filesystem::path>(&c.regionFile), \"bed file with target regions (optional)\")\n (\"name,a\", boost::program_options::value<std::string>(&sampleName), \"sample name (optional, otherwise SM tag is used)\")\n (\"jsonout,j\", boost::program_options::value<boost::filesystem::path>(&c.jsonout), \"gzipped json output file\")\n (\"outfile,o\", boost::program_options::value<boost::filesystem::path>(&c.outfile), \"gzipped tsv output file\")\n (\"secondary,s\", \"evaluate secondary alignments\")\n (\"supplementary,u\", \"evaluate supplementary alignments\") \n ;\n\n boost::program_options::options_description rgopt(\"Read-group options\");\n rgopt.add_options()\n (\"rg,g\", boost::program_options::value<std::string>(&c.rgname), \"only analyze this read group (optional)\")\n (\"ignore,i\", \"ignore read-groups\")\n ;\n\n boost::program_options::options_description hidden(\"Hidden options\");\n hidden.add_options()\n (\"nxchrlen,n\", boost::program_options::value<float>(&c.nXChrLen)->default_value(0.95), \"N95 chromosome length to trim mapping table [0,1]\")\n (\"input-file\", boost::program_options::value<boost::filesystem::path>(&c.bamFile), \"input bam file\")\n ;\n\n boost::program_options::positional_options_description pos_args;\n pos_args.add(\"input-file\", -1);\n\n boost::program_options::options_description cmdline_options;\n cmdline_options.add(generic).add(rgopt).add(hidden);\n boost::program_options::options_description visible_options;\n visible_options.add(generic).add(rgopt);\n\n // Parse command-line\n boost::program_options::variables_map vm;\n boost::program_options::store(boost::program_options::command_line_parser(argc, argv).options(cmdline_options).positional(pos_args).run(), vm);\n boost::program_options::notify(vm);\n\n // Check command line arguments\n if ((vm.count(\"help\")) || ((!vm.count(\"outfile\")) && (!vm.count(\"jsonout\"))) || (!vm.count(\"input-file\")) || (!vm.count(\"reference\"))) {\n std::cout << std::endl;\n std::cout << \"Usage: alfred \" << argv[0] << \" [OPTIONS] -r <ref.fa> -j <qc.json.gz> <aligned.bam>\" << std::endl;\n std::cout << visible_options << \"\\n\";\n return 1;\n }\n\n // Tsv output file\n if (vm.count(\"outfile\")) c.hasTsvOut = true;\n else c.hasTsvOut = false;\n\n // Json output file\n if (vm.count(\"jsonout\")) c.hasJsonOut = true;\n else c.hasJsonOut = false;\n\n // Secondary alignments\n if (vm.count(\"secondary\")) c.secondary = true;\n else c.secondary = false;\n\n // Supplementary alignments\n if (vm.count(\"supplementary\")) c.supplementary = true;\n else c.supplementary = false;\n \n // Check N95\n if (c.nXChrLen > 1) c.nXChrLen = 1;\n else if (c.nXChrLen < 0) c.nXChrLen = 0;\n\n // Check genome\n if (!(boost::filesystem::exists(c.genome) && boost::filesystem::is_regular_file(c.genome) && boost::filesystem::file_size(c.genome))) {\n std::cerr << \"Input reference file is missing: \" << c.genome.string() << std::endl;\n return 1;\n } else {\n faidx_t* fai = fai_load(c.genome.string().c_str());\n if (fai == NULL) {\n if (fai_build(c.genome.string().c_str()) == -1) {\n\tstd::cerr << \"Fail to open genome fai index for \" << c.genome.string() << std::endl;\n\treturn 1;\n } else fai = fai_load(c.genome.string().c_str());\n }\n fai_destroy(fai);\n }\n\n // Check bam file\n if (!(boost::filesystem::exists(c.bamFile) && boost::filesystem::is_regular_file(c.bamFile) && boost::filesystem::file_size(c.bamFile))) {\n std::cerr << \"Alignment file is missing: \" << c.bamFile.string() << std::endl;\n return 1;\n }\n samFile* samfile = sam_open(c.bamFile.string().c_str(), \"r\");\n if (samfile == NULL) {\n std::cerr << \"Fail to open file \" << c.bamFile.string() << std::endl;\n return 1;\n }\n hts_idx_t* idx = sam_index_load(samfile, c.bamFile.string().c_str());\n if (idx == NULL) {\n if (bam_index_build(c.bamFile.string().c_str(), 0) != 0) {\n std::cerr << \"Fail to open index for \" << c.bamFile.string() << std::endl;\n return 1;\n }\n }\n bam_hdr_t* hdr = sam_hdr_read(samfile);\n faidx_t* fai = fai_load(c.genome.string().c_str());\n for(int32_t refIndex=0; refIndex < hdr->n_targets; ++refIndex) {\n std::string tname(hdr->target_name[refIndex]);\n if (!faidx_has_seq(fai, tname.c_str())) {\n std::cerr << \"BAM file chromosome \" << hdr->target_name[refIndex] << \" is NOT present in your reference file \" << c.genome.string() << std::endl;\n return 1;\n }\n }\n fai_destroy(fai);\n if (!vm.count(\"name\")) {\n if (!getSMTag(std::string(hdr->text), c.bamFile.stem().string(), sampleName)) {\n std::cerr << \"Only one sample (@RG:SM) is allowed per input BAM file \" << c.bamFile.string() << std::endl;\n return 1;\n } else c.sampleName = sampleName;\n } else {\n if (sampleName.size()) c.sampleName = sampleName;\n else {\n std::cerr << \"Empty sample name is not allowed!\" << std::endl;\n return 1;\n }\n }\n bam_hdr_destroy(hdr);\n hts_idx_destroy(idx);\n sam_close(samfile);\n\n // Ignore read groups\n c.ignoreRG = true;\n c.singleRG = false;\n if (!vm.count(\"ignore\")) {\n // Single RG\n if (vm.count(\"rg\")) {\n c.ignoreRG = false;\n c.singleRG = true;\n } else {\n // Check number of RGs\n int32_t rgc = countRGs(c);\n if (rgc > 3) {\n\tstd::cerr << \"Warning: BAM file has more than 3 RGs. Please run read-groups separately!\" << std::endl;\n\tstd::cerr << \"Warning: Program continues but ignores read-groups.\" << std::endl;\n } else {\n\tc.ignoreRG = false;\n\tc.singleRG = false;\n }\n }\n }\n\n \n // Check region file\n if (vm.count(\"bed\")) {\n if (!(boost::filesystem::exists(c.regionFile) && boost::filesystem::is_regular_file(c.regionFile) && boost::filesystem::file_size(c.regionFile))) {\n std::cerr << \"Input region file in bed format is missing: \" << c.regionFile.string() << std::endl;\n return 1;\n }\n std::string oldChr;\n faidx_t* fai = fai_load(c.genome.string().c_str());\n if (is_gz(c.regionFile)) {\n std::ifstream file(c.regionFile.string().c_str(), std::ios_base::in | std::ios_base::binary);\n boost::iostreams::filtering_streambuf<boost::iostreams::input> dataIn;\n dataIn.push(boost::iostreams::gzip_decompressor());\n dataIn.push(file);\n std::istream instream(&dataIn);\n std::string intervalLine;\n while(std::getline(instream, intervalLine)) {\n\ttypedef boost::tokenizer< boost::char_separator<char> > Tokenizer;\n\tboost::char_separator<char> sep(\" \\t,;\");\n\tTokenizer tokens(intervalLine, sep);\n\tTokenizer::iterator tokIter = tokens.begin();\n\tif (tokIter!=tokens.end()) {\n\t std::string chrName=*tokIter++;\n\t if (chrName.compare(oldChr) != 0) {\n\t oldChr = chrName;\n\t if (!faidx_has_seq(fai, chrName.c_str())) {\n\t std::cerr << \"Chromosome from bed file \" << chrName << \" is NOT present in your reference file \" << c.genome.string() << std::endl;\n\t return 1;\n\t }\n\t }\n\t}\n }\n dataIn.pop();\n } else {\n std::ifstream interval_file(c.regionFile.string().c_str(), std::ifstream::in);\n if (interval_file.is_open()) {\n\twhile (interval_file.good()) {\n\t std::string intervalLine;\n\t getline(interval_file, intervalLine);\n\t typedef boost::tokenizer< boost::char_separator<char> > Tokenizer;\n\t boost::char_separator<char> sep(\" \\t,;\");\n\t Tokenizer tokens(intervalLine, sep);\n\t Tokenizer::iterator tokIter = tokens.begin();\n\t if (tokIter!=tokens.end()) {\n\t std::string chrName=*tokIter++;\n\t if (chrName.compare(oldChr) != 0) {\n\t oldChr = chrName;\n\t if (!faidx_has_seq(fai, chrName.c_str())) {\n\t\tstd::cerr << \"Chromosome from bed file \" << chrName << \" is NOT present in your reference file \" << c.genome.string() << std::endl;\n\t\treturn 1;\n\t }\n\t }\n\t }\n\t}\n\tinterval_file.close();\n }\n }\n fai_destroy(fai);\n c.hasRegionFile = true;\n } else c.hasRegionFile = false;\n\n // Show cmd\n boost::posix_time::ptime now = boost::posix_time::second_clock::local_time();\n std::cout << '[' << boost::posix_time::to_simple_string(now) << \"] \";\n std::cout << \"alfred \";\n for(int i=0; i<argc; ++i) { std::cout << argv[i] << ' '; }\n std::cout << std::endl;\n\n return bamStatsRun(c); \n}\n\n}\n\n#endif\n" }, { "alpha_fraction": 0.5806368589401245, "alphanum_fraction": 0.6047549843788147, "avg_line_length": 41.061370849609375, "blob_id": "7bdee09365ba35a336e0e039fe6cc3c44313e868", "content_id": "83302c47c0d894ae616975c8ba37b396a47b578e", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 11651, "license_type": "permissive", "max_line_length": 165, "num_lines": 277, "path": "/scripts/stats.R", "repo_name": "tobiasrausch/bamStats", "src_encoding": "UTF-8", "text": "library(ggplot2)\nlibrary(scales)\n\n# Output pdf\nargs=commandArgs(trailingOnly=TRUE)\nif (length(args) > 1) { pdffile = args[2]; } else { pdffile = paste0(args[1], \".pdf\"); }\npdf(pdffile)\n\nprint(\"Base Content\")\ncmd=paste0('zgrep ^BC ', args[1], ' | cut -f 2-')\nall=read.table(pipe(cmd), header=T)\nfor(sid in unique(all$Sample)) {\n\tfor(rg in unique(all[all$Sample == sid,]$Library)) {\n\t base = all[all$Sample == sid & all$Library == rg,]\n\t p1=ggplot(data=base, aes(x=Position, y=Fraction))\n\t p1=p1 + geom_line(aes(group=Base, colour=Base))\n\t p1=p1 + xlab(\"Position in read\") + ylab(\"Base Content Fraction\")\n\t p1=p1 + scale_y_continuous(labels=comma, limits=c(0, max(base$Fraction)))\n\t p1=p1 + ggtitle(paste0(\"Base Content Distribution\", \"\\n\", \"Sample: \", sid, \"\\n\", \"RG: \", rg))\n\t p1=p1 + scale_x_continuous(labels=comma)\n\t p1=p1 + facet_wrap(~ Read)\n\t p1=p1 + theme(legend.position=\"bottom\", legend.direction='horizontal')\n print(p1)\n\t}\n}\nprint(warnings())\t\n\nprint(\"Base Qualities\")\ncmd=paste0('zgrep ^BQ ', args[1], ' | cut -f 2-')\nall=read.table(pipe(cmd), header=T)\nfor(sid in unique(all$Sample)) {\n\tfor(rg in unique(all[all$Sample == sid,]$Library)) {\t\n\t bq = all[all$Sample == sid & all$Library == rg,]\n\t p1=ggplot(data=bq, aes(x=Position, y=BaseQual))\n \t p1=p1 + geom_line(aes(color=Read, group=Read))\n \t p1=p1 + xlab(\"Position in read\") + ylab(\"Mean Base Quality\")\n \t p1=p1 + scale_y_continuous(labels=comma, limits=c(0, max(bq$BaseQual)))\n\t p1=p1 + ggtitle(paste0(\"Base Quality Distribution\", \"\\n\", \"Sample: \", sid, \"\\n\", \"RG: \", rg))\n \t p1=p1 + scale_x_continuous(labels=comma)\n \t p1=p1 + theme(legend.position=\"bottom\", legend.direction='horizontal')\n \t print(p1)\n\t}\n}\nprint(warnings())\n\nprint(\"Read length\")\ncmd=paste0('zgrep ^RL ', args[1], ' | cut -f 2-')\nall=read.table(pipe(cmd), header=T)\nfor(sid in unique(all$Sample)) {\n\tfor(rg in unique(all[all$Sample == sid,]$Library)) {\n\t rl = all[all$Sample == sid & all$Library == rg,]\n\t tc=sum(as.numeric(rl$Count))\n\t upBound=60000\n\t gr=sum(as.numeric(rl[rl$Readlength>upBound,]$Count))\n\t infoMax = paste0(\"Read Length > \", upBound, \" (\", round(100 * gr / tc, digits=2), \"%)\")\n\t rl=rl[rl$Readlength <= upBound,]\n\t p1=ggplot(data=rl, aes(x=Readlength, y=Fraction))\n\t p1=p1 + geom_bar(aes(fill=Read, group=Read), stat=\"identity\", position=\"dodge\")\n\t p1=p1 + xlab(\"Read length\") + ylab(\"Fraction of reads\")\n\t p1=p1 + scale_y_continuous(labels=comma)\n\t if (gr) { p1=p1 + ggtitle(paste0(\"Read Length Distribution\", \"\\n\", infoMax, \"\\n\", \"Sample: \", sid, \"\\n\", \"RG: \", rg)); }\n\t else { p1=p1 + ggtitle(paste0(\"Read Length Distribution\", \"\\n\", \"Sample: \", sid, \"\\n\", \"RG: \", rg)); }\n\t p1=p1 + scale_x_continuous(labels=comma)\n\t p1=p1 + theme(legend.position=\"bottom\", legend.direction='horizontal')\n\t print(p1)\n\t}\n}\nprint(warnings())\n\nprint(\"Mapping quality\")\ncmd=paste0('zgrep ^MQ ', args[1], ' | cut -f 2-')\nall=read.table(pipe(cmd), header=T)\nfor(sid in unique(all$Sample)) {\n\tfor(rg in unique(all[all$Sample == sid,]$Library)) {\n\t mq = all[all$Sample == sid & all$Library == rg,]\n\t p1=ggplot(data=mq, aes(x=MappingQuality, y=Fraction))\n\t p1=p1 + geom_bar(fill=\"darkblue\", stat=\"identity\", position=\"dodge\")\n\t p1=p1 + xlab(\"Mapping Quality\") + ylab(\"Fraction of reads\")\n\t p1=p1 + scale_y_continuous(labels=comma)\n\t p1=p1 + ggtitle(paste0(\"Mapping Quality Distribution\", \"\\n\", \"Sample: \", sid, \"\\n\", \"RG: \", rg))\n\t p1=p1 + scale_x_continuous(labels=comma)\n\t p1=p1 + theme(legend.position=\"bottom\", legend.direction='horizontal')\n\t print(p1)\n\t}\n}\nprint(warnings())\n\nprint(\"Coverage\")\ncmd=paste0('zgrep ^CO ', args[1], ' | cut -f 2-')\nall=read.table(pipe(cmd), header=T)\nfor(sid in unique(all$Sample)) {\n for(rg in unique(all[all$Sample == sid,]$Library)) {\n\tcov = all[all$Sample == sid & all$Library == rg,]\n\ttc=sum(as.numeric(cov$Count))\n\tif (tc > 0) {\n\t upBound=max(cov[cov$Quantile >= 0.001 & cov$Quantile <= 0.999,]$Coverage)\n\t gr=sum(as.numeric(cov[cov$Coverage>upBound,]$Count))\n\t infoMax = paste0(\"Coverage > \", upBound, \" (\", round(100 * gr / tc, digits=2), \"%)\")\n\t cov=cov[cov$Coverage <= upBound,]\n\t p1=ggplot(data=cov, aes(x=Coverage, y=Count))\n\t p1=p1 + geom_line()\n\t p1=p1 + scale_y_continuous(labels=comma)\n\t p1=p1 + ggtitle(paste0(\"Coverage Distribution\", \"\\n\", infoMax, \"\\n\", \"Sample: \", sid, \"\\n\", \"RG: \", rg))\n\t p1=p1 + scale_x_continuous(labels=comma)\n\t p1=p1 + theme(legend.position=\"bottom\", legend.direction='horizontal')\n\t print(p1)\n\t}\n }\n}\nprint(warnings())\n\nprint(\"Insert Size\")\ncmd=paste0('zgrep ^IS ', args[1], ' | cut -f 2-')\nall=read.table(pipe(cmd), header=T)\nfor(sid in unique(all$Sample)) {\n for(rg in unique(all[all$Sample == sid,]$Library)) {\n ins = all[all$Sample == sid & all$Library == rg,]\n tc=sum(as.numeric(ins$Count))\n if (tc > 0) {\n \tupBound=max(ins[ins$Quantile >= 0.001 & ins$Quantile <= 0.999,]$InsertSize)\n\tgr=sum(as.numeric(ins[ins$InsertSize>upBound,]$Count))\n\tinfoMax = paste0(\"Insert size > \", upBound, \" (\", round(100 * gr / tc, digits=2), \"%)\")\n\tins=ins[ins$InsertSize <= upBound,]\n\tp1=ggplot(data=ins, aes(x=InsertSize, y=Count))\n\tp1=p1 + geom_line(aes(group=Layout, colour=Layout))\n\tp1=p1 + scale_y_continuous(labels=comma)\n\tp1=p1 + ggtitle(paste0(\"Insert Size Distribution\", \"\\n\", infoMax, \"\\n\", \"Sample: \", sid, \"\\n\", \"RG: \", rg))\n\tp1=p1 + scale_x_continuous(labels=comma)\n\tp1=p1 + theme(axis.text.x = element_text(angle=45, hjust=1))\n\tprint(p1)\n }\n }\n}\nprint(warnings())\n\nprint(\"Homopolymer InDels\")\ncmd=paste0('zgrep ^IC ', args[1], ' | cut -f 2-')\nall=read.table(pipe(cmd), header=T)\nfor(sid in unique(all$Sample)) {\n\tfor(rg in unique(all[all$Sample == sid,]$Library)) {\t\n\t ic = all[all$Sample == sid & all$Library == rg,]\n\t ic$Homopolymer = factor(ic$Homopolymer, levels=c(\"A\", \"C\", \"G\", \"T\", \"N\", \"None\"))\n\t p1=ggplot(data=ic, aes(x=Homopolymer, y=Fraction))\n \t p1=p1 + geom_bar(aes(group=InDel, fill=InDel), stat=\"identity\", position=\"dodge\")\n \t p1=p1 + xlab(\"Homopolymer Context\") + ylab(\"InDel Fraction\")\n \t p1=p1 + scale_y_continuous(labels=comma)\n\t p1=p1 + ggtitle(paste0(\"InDel Homopolymer Context\", \"\\n\", \"Sample: \", sid, \"\\n\", \"RG: \", rg))\n \t p1=p1 + theme(legend.position=\"bottom\", legend.direction='horizontal')\n \t print(p1)\n\t}\n}\nprint(warnings())\n\nprint(\"InDel Size\")\ncmd=paste0('zgrep ^IZ ', args[1], ' | cut -f 2-')\nall=read.table(pipe(cmd), header=T)\nfor(sid in unique(all$Sample)) {\n\tfor(rg in unique(all[all$Sample == sid,]$Library)) {\t\n\t iz = all[all$Sample == sid & all$Library == rg,]\n\t p1=ggplot(data=iz, aes(x=Size, y=Count))\n \t p1=p1 + geom_line(aes(group=InDel, color=InDel))\n \t p1=p1 + xlab(\"InDel Size\") + ylab(\"InDel Count\")\n \t p1=p1 + scale_y_continuous(labels=comma)\n \t p1=p1 + scale_x_continuous(labels=comma)\n\t p1=p1 + ggtitle(paste0(\"InDel Size\", \"\\n\", \"Sample: \", sid, \"\\n\", \"RG: \", rg))\n \t p1=p1 + theme(legend.position=\"bottom\", legend.direction='horizontal')\n \t print(p1)\n\t}\n}\nprint(warnings())\n\nprint(\"GC Content\")\ncmd=paste0('zgrep ^GC ', args[1], ' | cut -f 2-')\nall=read.table(pipe(cmd), header=T)\nif (1) {\n p1=ggplot(data=all, aes(x=GCcontent, y=fractionOfReads))\n p1=p1 + geom_line(aes(group=Library, color=Library))\n p1=p1 + xlab(\"GC content\") + ylab(\"Fraction of reads or reference windows\")\n p1=p1 + scale_y_continuous(labels=comma)\n p1=p1 + ggtitle(\"GC-Content Distribution\")\n p1=p1 + scale_x_continuous(labels=comma)\n p1=p1 + theme(legend.position=\"bottom\", legend.direction='horizontal')\n print(p1)\n}\nprint(warnings())\n\n# BED file\ncmd=paste0('zgrep ^OT ', args[1], ' | cut -f 2-')\nall=tryCatch(read.table(pipe(cmd), header=T), error=function(e) NULL)\nif (!is.null(all)) {\n print(\"On-target rate\");\n for(sid in unique(all$Sample)) {\n for(rg in unique(all[all$Sample == sid,]$Library)) {\n \tot = all[all$Sample == sid & all$Library == rg,]\n\tot$Extension=as.numeric(ot$Extension)\n\tot$OnTarget=as.numeric(ot$OnTarget)\n\tp1=ggplot(data=ot, aes(x=Extension, y=OnTarget))\n\tp1=p1 + geom_line()\n\tp1=p1 + scale_y_continuous(labels=comma, limits=c(0,1))\n\tp1=p1 + ggtitle(paste0(\"On-target rate\", \"\\n\", \"Sample: \", sid, \"\\n\", \"RG: \", rg))\n\tp1=p1 + scale_x_continuous(labels=comma) + xlab(\"Left/Right Extension of target region\") + ylab(\"Fraction of reads on-target\")\n\tp1=p1 + facet_wrap(~ Sample) + theme(legend.position=\"bottom\", legend.direction='horizontal')\n\tprint(p1)\n }\n }\n print(warnings())\n}\ncmd=paste0('zgrep ^TC ', args[1], ' | cut -f 2-')\nall=tryCatch(read.table(pipe(cmd), header=T), error=function(e) NULL)\nif (!is.null(all)) {\n print(\"Target coverage\");\n for(sid in unique(all$Sample)) {\n for(rg in unique(all[all$Sample == sid,]$Library)) {\n \tx = all[all$Sample == sid & all$Library == rg,]\n\ts = data.frame()\n\tsl=unique(x[,c(\"Sample\", \"Library\")])\n\tfor(i in 1:nrow(sl)) {\n\t slc=x[(x$Sample == sl[i,\"Sample\"]) & (x$Library == sl[i,\"Library\"]),]\n\t for (j in 0:max(x$AvgCov)) { s=rbind(s, cbind(Sample=as.character(sl[i,\"Sample\"]), Library=as.character(sl[i,\"Library\"]), frac=mean(x$AvgCov >= j), cov=j)); }\n\t}\n \ts$cov = as.numeric(as.character(s$cov))\n \ts$frac = as.numeric(as.character(s$frac))\n\n \t# Remove trailing small values\n \ttc=sum(as.numeric(s$frac))\n \tsc=sort(s$frac)\n \tlowcut=sc[cumsum(sc) >= 0.01 * tc][1]\n \tsl=unique(s[,c(\"Sample\", \"Library\")])\n \tsfilt = data.frame()\n \tfor(i in 1:nrow(sl)) {\n \t slc=s[(s$Sample == sl[i,\"Sample\"]) & (s$Library == sl[i,\"Library\"]),]\n \t sfilt=rbind(sfilt, slc[which(slc$frac>=lowcut)[1]:tail(which(slc$frac>=lowcut), n=1),])\n \t}\n \tp1=ggplot(data=sfilt, aes(x=cov, y=frac))\n \tp1=p1 + geom_line()\n \tp1=p1 + scale_y_continuous(labels=comma)\n\tp1=p1 + ggtitle(paste0(\"Target coverage distribution\", \"\\n\", \"Sample: \", sid, \"\\n\", \"RG: \", rg))\n \tp1=p1 + scale_x_continuous(labels=comma) + xlab(\"Coverage\") + ylab(\"Fraction of targets above coverage level\")\n \tp1=p1 + theme(legend.position=\"bottom\", legend.direction='horizontal')\n \tprint(p1)\n }\n }\n print(warnings())\t\n}\n\ncmd=paste0('zgrep ^PS ', args[1], ' | cut -f 2-')\nall=tryCatch(read.table(pipe(cmd), header=T), error=function(e) NULL)\nif (!is.null(all)) {\n print(\"Phased block length\");\n for(sid in unique(all$Sample)) {\n for(rg in unique(all[all$Sample == sid,]$Library)) {\n \tx = all[all$Sample == sid & all$Library == rg,]\n \tp1=ggplot(data=x, aes(x=Size))\n \tp1=p1 + geom_histogram(bins=50)\n \tp1=p1 + scale_y_continuous(labels=comma)\n\tp1=p1 + ggtitle(paste0(\"Phased block length distribution\", \"\\n\", \"Sample: \", sid, \"\\n\", \"RG: \", rg))\n \tp1=p1 + scale_x_continuous(labels=comma) + xlab(\"Phased block size\") + ylab(\"Count\")\n \tprint(p1)\n }\n }\n print(warnings())\n}\n\n\nprint(\"Metrics\")\ncmd=paste0('zgrep ^ME ', args[1], ' | cut -f 2-')\nall=read.table(pipe(cmd), header=F, comment.char=\"$\")\nall=as.data.frame(t(all))\nnc=ncol(all) + 1\nme=data.frame(x=nc, y=1:nrow(all))\np1=ggplot(data=me, aes(x=x, y=y)) + geom_blank() + xlim(0, nc) + theme(line=element_blank(), text=element_blank(), title=element_blank())\nfor(i in 1:nrow(all)) { for(j in 1:ncol(all)) { p1=p1 + annotate(\"text\", x=j, y=nrow(all)-i, label=all[i, j], size=2); } }\nprint(p1)\nprint(warnings())\n\n# Close output file\ndev.off()\n" }, { "alpha_fraction": 0.5820827484130859, "alphanum_fraction": 0.5962369441986084, "avg_line_length": 36.91379165649414, "blob_id": "df52a92d70e82395c820cfb4e15ee281a8df71e9", "content_id": "84993fd83466233fa94e42176a14d694eb47d423", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 17592, "license_type": "permissive", "max_line_length": 371, "num_lines": 464, "path": "/src/split.h", "repo_name": "tobiasrausch/bamStats", "src_encoding": "UTF-8", "text": "#ifndef SPLIT_H\n#define SPLIT_H\n\n#include <iostream>\n#include <vector>\n#include <fstream>\n\n#include <boost/program_options/cmdline.hpp>\n#include <boost/program_options/options_description.hpp>\n#include <boost/program_options/parsers.hpp>\n#include <boost/program_options/variables_map.hpp>\n#include <boost/date_time/posix_time/posix_time.hpp>\n#include <boost/date_time/gregorian/gregorian.hpp>\n#include <boost/iostreams/device/file.hpp>\n#include <boost/iostreams/stream.hpp>\n#include <boost/iostreams/stream_buffer.hpp>\n#include <boost/iostreams/filtering_stream.hpp>\n#include <boost/iostreams/filter/zlib.hpp>\n#include <boost/iostreams/filter/gzip.hpp>\n#include <boost/random.hpp>\n#include <boost/generator_iterator.hpp>\n#include <boost/tuple/tuple.hpp>\n#include <boost/filesystem.hpp>\n#include <boost/progress.hpp>\n\n#include <htslib/faidx.h>\n#include <htslib/sam.h>\n#include <htslib/vcf.h>\n\n#include \"util.h\"\n#include \"variants.h\"\n\nnamespace bamstats\n{\n\n struct SplitConfig {\n bool assign;\n bool interleaved;\n unsigned short minMapQual;\n std::string sample;\n boost::filesystem::path genome;\n boost::filesystem::path h1bam;\n boost::filesystem::path h2bam;\n boost::filesystem::path bamfile;\n boost::filesystem::path vcffile;\n };\n\n template<typename TConfig>\n inline int32_t\n phaseBamRun(TConfig const& c) {\n\n#ifdef PROFILE\n ProfilerStart(\"alfred.prof\");\n#endif\n\n // Load bam files\n samFile* samfile = sam_open(c.bamfile.string().c_str(), \"r\");\n hts_set_fai_filename(samfile, c.genome.string().c_str());\n hts_idx_t* idx = sam_index_load(samfile, c.bamfile.string().c_str());\n bam_hdr_t* hdr = sam_hdr_read(samfile);\n\n // Load bcf file\n htsFile* ibcffile = bcf_open(c.vcffile.string().c_str(), \"r\");\n hts_idx_t* bcfidx = bcf_index_load(c.vcffile.string().c_str());\n bcf_hdr_t* bcfhdr = bcf_hdr_read(ibcffile);\n\n boost::posix_time::ptime now = boost::posix_time::second_clock::local_time();\n std::cout << '[' << boost::posix_time::to_simple_string(now) << \"] \" << \"Assign reads to haplotypes\" << std::endl;\n boost::progress_display show_progress(hdr->n_targets);\n\n // Open output file\n samFile* h1bam = sam_open(c.h1bam.string().c_str(), \"wb\");\n if (sam_hdr_write(h1bam, hdr) != 0) {\n std::cerr << \"Could not write ouptut file header!\" << std::endl;\n return -1;\n }\n\n samFile* h2bam = NULL;\n if (!c.interleaved) {\n h2bam = sam_open(c.h2bam.string().c_str(), \"wb\");\n if (sam_hdr_write(h2bam, hdr) != 0) {\n\tstd::cerr << \"Could not write ouptut file header!\" << std::endl;\n\treturn -1;\n }\n }\n\n // Assign reads to SNPs\n uint32_t assignedReadsH1 = 0;\n uint32_t assignedReadsH2 = 0;\n uint32_t unassignedReads = 0;\n uint32_t ambiguousReads = 0;\n uint64_t assignedBasesH1 = 0;\n uint64_t assignedBasesH2 = 0;\n uint64_t unassignedBases = 0;\n uint64_t ambiguousBases = 0;\n faidx_t* fai = fai_load(c.genome.string().c_str());\n for (int refIndex = 0; refIndex<hdr->n_targets; ++refIndex) {\n std::string chrName(hdr->target_name[refIndex]);\n ++show_progress;\n\n // Load het. markers\n typedef std::vector<BiallelicVariant> TPhasedVariants;\n TPhasedVariants pv;\n if (!_loadVariants(ibcffile, bcfidx, bcfhdr, c.sample, chrName, pv)) continue;\n if (pv.empty()) continue;\n \n // Sort variants\n std::sort(pv.begin(), pv.end(), SortVariants<BiallelicVariant>());\n\n // Load reference\n int32_t seqlen = -1;\n char* seq = NULL;\n seq = faidx_fetch_seq(fai, chrName.c_str(), 0, hdr->target_len[refIndex], &seqlen); \n \n // Assign reads to haplotypes\n std::set<std::size_t> h1;\n std::set<std::size_t> h2;\n hts_itr_t* iter = sam_itr_queryi(idx, refIndex, 0, hdr->target_len[refIndex]);\n bam1_t* rec = bam_init1();\n while (sam_itr_next(samfile, iter, rec) >= 0) {\n\tif (rec->core.flag & (BAM_FSECONDARY | BAM_FQCFAIL | BAM_FDUP | BAM_FSUPPLEMENTARY | BAM_FUNMAP)) continue;\n\tif ((rec->core.qual < c.minMapQual) || (rec->core.tid<0)) continue;\n\tif ((rec->core.flag & BAM_FPAIRED) && (rec->core.flag & BAM_FMUNMAP)) continue;\n\tuint32_t hp1votes = 0;\n\tuint32_t hp2votes = 0;\n\tTPhasedVariants::const_iterator vIt = std::lower_bound(pv.begin(), pv.end(), BiallelicVariant(rec->core.pos), SortVariants<BiallelicVariant>());\n\tTPhasedVariants::const_iterator vItEnd = std::upper_bound(pv.begin(), pv.end(), BiallelicVariant(lastAlignedPosition(rec)), SortVariants<BiallelicVariant>());\n\tif (vIt != vItEnd) {\n\t // Get read sequence\n\t std::string sequence;\n\t sequence.resize(rec->core.l_qseq);\n\t uint8_t* seqptr = bam_get_seq(rec);\n\t for (int32_t i = 0; i < rec->core.l_qseq; ++i) sequence[i] = \"=ACMGRSVTWYHKDBN\"[bam_seqi(seqptr, i)];\n\t \n\t // Parse CIGAR\n\t uint32_t* cigar = bam_get_cigar(rec);\n\t for(;vIt != vItEnd; ++vIt) {\n\t int32_t gp = rec->core.pos; // Genomic position\n\t int32_t sp = 0; // Sequence position\n\t bool varFound = false;\n\t for (std::size_t i = 0; ((i < rec->core.n_cigar) && (!varFound)); ++i) {\n\t if (bam_cigar_op(cigar[i]) == BAM_CSOFT_CLIP) sp += bam_cigar_oplen(cigar[i]);\n\t else if (bam_cigar_op(cigar[i]) == BAM_CINS) sp += bam_cigar_oplen(cigar[i]);\n\t else if (bam_cigar_op(cigar[i]) == BAM_CSOFT_CLIP) sp += bam_cigar_oplen(cigar[i]);\n\t else if (bam_cigar_op(cigar[i]) == BAM_CDEL) gp += bam_cigar_oplen(cigar[i]);\n\t else if (bam_cigar_op(cigar[i]) == BAM_CREF_SKIP) gp += bam_cigar_oplen(cigar[i]);\n\t else if (bam_cigar_op(cigar[i]) == BAM_CHARD_CLIP) {\n\t\t//Nop\n\t } else if ((bam_cigar_op(cigar[i]) == BAM_CMATCH) || (bam_cigar_op(cigar[i]) == BAM_CEQUAL) || (bam_cigar_op(cigar[i]) == BAM_CDIFF)) {\n\t\tif (gp + (int32_t) bam_cigar_oplen(cigar[i]) < vIt->pos) {\n\t\t gp += bam_cigar_oplen(cigar[i]);\n\t\t sp += bam_cigar_oplen(cigar[i]);\n\t\t} else {\n\t\t for(std::size_t k = 0; k<bam_cigar_oplen(cigar[i]); ++k, ++sp, ++gp) {\n\t\t if (gp == vIt->pos) {\n\t\t varFound = true;\n\t\t // Check REF allele\n\t\t if (vIt->ref == std::string(seq + gp, seq + gp + vIt->ref.size())) {\n\t\t\t// Check ALT allele\n\t\t\tif ((sp + vIt->alt.size() < sequence.size()) && (sp + vIt->ref.size() < sequence.size())) {\n\t\t\t if (vIt->ref.size() == vIt->alt.size()) {\n\t\t\t // SNP\n\t\t\t if ((sequence.substr(sp, vIt->alt.size()) == vIt->alt) && (sequence.substr(sp, vIt->ref.size()) != vIt->ref)) {\n\t\t\t // ALT supporting read\n\t\t\t if (vIt->hap) ++hp1votes;\n\t\t\t else ++hp2votes;\n\t\t\t } else if ((sequence.substr(sp, vIt->alt.size()) != vIt->alt) && (sequence.substr(sp, vIt->ref.size()) == vIt->ref)) {\n\t\t\t // REF supporting read\n\t\t\t if (vIt->hap) ++hp2votes;\n\t\t\t else ++hp1votes;\n\t\t\t }\n\t\t\t } else if (vIt->ref.size() < vIt->alt.size()) {\n\t\t\t // Insertion\n\t\t\t int32_t diff = vIt->alt.size() - vIt->ref.size();\n\t\t\t std::string refProbe = vIt->ref + std::string(seq + gp + vIt->ref.size(), seq + gp + vIt->ref.size() + diff);\n\t\t\t if ((sequence.substr(sp, vIt->alt.size()) == vIt->alt) && (sequence.substr(sp, vIt->alt.size()) != refProbe)) {\n\t\t\t // ALT supporting read\n\t\t\t if (vIt->hap) ++hp1votes;\n\t\t\t else ++hp2votes;\n\t\t\t } else if ((sequence.substr(sp, vIt->alt.size()) != vIt->alt) && (sequence.substr(sp, vIt->alt.size()) == refProbe)) {\n\t\t\t // REF supporting read\n\t\t\t if (vIt->hap) ++hp2votes;\n\t\t\t else ++hp1votes;\n\t\t\t }\n\t\t\t } else {\n\t\t\t // Deletion\n\t\t\t int32_t diff = vIt->ref.size() - vIt->alt.size();\n\t\t\t std::string altProbe = vIt->alt + std::string(seq + gp + vIt->ref.size(), seq + gp + vIt->ref.size() + diff);\n\t\t\t if ((sequence.substr(sp, vIt->ref.size()) == altProbe) && (sequence.substr(sp, vIt->ref.size()) != vIt->ref)) {\n\t\t\t // ALT supporting read\n\t\t\t if (vIt->hap) ++hp1votes;\n\t\t\t else ++hp2votes;\n\t\t\t } else if ((sequence.substr(sp, vIt->ref.size()) != altProbe) && (sequence.substr(sp, vIt->ref.size()) == vIt->ref)) {\n\t\t\t // REF supporting read\n\t\t\t if (vIt->hap) ++hp2votes;\n\t\t\t else ++hp1votes;\n\t\t\t }\n\t\t\t }\n\t\t\t}\n\t\t }\n\t\t }\n\t\t }\n\t\t}\n\t } else {\n\t\tstd::cerr << \"Unknown Cigar options\" << std::endl;\n\t\treturn 1;\n\t }\n\t }\n\t }\n\t int32_t hp = 0;\n\t if (hp1votes > 2*hp2votes) hp = 1;\n\t else if (hp2votes > 2*hp1votes) hp = 2;\n\t if (hp) {\n\t if (hp == 1) {\n\t if (rec->core.flag & BAM_FREAD1) h1.insert(hash_pair(rec));\n\t else h1.insert(hash_pair_mate(rec));\n\t } else {\n\t if (rec->core.flag & BAM_FREAD1) h2.insert(hash_pair(rec));\n\t else h2.insert(hash_pair_mate(rec));\n\t }\n\t }\n\t}\n }\n bam_destroy1(rec);\n hts_itr_destroy(iter);\n\n // Random number generator\n typedef boost::mt19937 RNGType;\n RNGType rng;\n boost::uniform_int<> one_or_two(1,2);\n boost::variate_generator< RNGType, boost::uniform_int<> > dice(rng, one_or_two);\n \n // Fetch all pairs\n hts_itr_t* itr = sam_itr_queryi(idx, refIndex, 0, hdr->target_len[refIndex]);\n bam1_t* r = bam_init1();\n while (sam_itr_next(samfile, itr, r) >= 0) {\n\tint32_t slen = r->core.l_qseq;\n\tbool h1Found = false;\n\tbool h2Found = false;\n\tif (r->core.flag & BAM_FREAD1) {\n\t if (h1.find(hash_pair(r)) != h1.end()) h1Found = true;\n\t if (h2.find(hash_pair(r)) != h2.end()) h2Found = true;\n\t} else {\n\t if (h1.find(hash_pair_mate(r)) != h1.end()) h1Found = true;\n\t if (h2.find(hash_pair_mate(r)) != h2.end()) h2Found = true;\n\t}\n\tif ((h1Found) && (h2Found)) {\n\t // Inconsistent haplotype assignment for this pair\n\t //std::cout << \"Read\\t\" << bam_get_qname(r) << \"\\t\" << hdr->target_name[r->core.tid] << \"\\t\" << r->core.pos << \"\\t\" << hdr->target_name[r->core.mtid] << \"\\t\" << r->core.mpos << std::endl;\n\t ++ambiguousReads;\n\t ambiguousBases += slen;\n\t} else if ((!h1Found) && (!h2Found)) {\n\t ++unassignedReads;\n\t unassignedBases += slen;\n\t if (c.assign) {\n\t // Random assignment\n\t int32_t hrnd = dice();\n\t if (hrnd == 1) {\n\t bam_aux_append(r, \"HP\", 'i', 4, (uint8_t*)&hrnd);\n\t if (!sam_write1(h1bam, hdr, r)) {\n\t\tstd::cerr << \"Could not write to bam file!\" << std::endl;\n\t\treturn -1;\n\t }\n\t } else {\n\t bam_aux_append(r, \"HP\", 'i', 4, (uint8_t*)&hrnd);\n\t if (!c.interleaved) {\n\t\tif (!sam_write1(h2bam, hdr, r)) {\n\t\t std::cerr << \"Could not write to bam file!\" << std::endl;\n\t\t return -1;\n\t\t}\n\t } else {\n\t\tif (!sam_write1(h1bam, hdr, r)) {\n\t\t std::cerr << \"Could not write to bam file!\" << std::endl;\n\t\t return -1;\n\t\t}\n\t }\n\t }\n\t }\n\t} else if ((h1Found) && (!h2Found)) {\n\t int32_t hp = 1;\n\t ++assignedReadsH1;\n\t assignedBasesH1 += slen;\n\t bam_aux_append(r, \"HP\", 'i', 4, (uint8_t*)&hp);\n\t if (!sam_write1(h1bam, hdr, r)) {\n\t std::cerr << \"Could not write to bam file!\" << std::endl;\n\t return -1;\n\t }\n\t} else if ((!h1Found) && (h2Found)) {\n\t int32_t hp = 2;\n\t ++assignedReadsH2;\n\t assignedBasesH2 += slen;\n\t bam_aux_append(r, \"HP\", 'i', 4, (uint8_t*)&hp);\n\t if (!c.interleaved) {\n\t if (!sam_write1(h2bam, hdr, r)) {\n\t std::cerr << \"Could not write to bam file!\" << std::endl;\n\t return -1;\n\t }\n\t } else {\n\t if (!sam_write1(h1bam, hdr, r)) {\n\t std::cerr << \"Could not write to bam file!\" << std::endl;\n\t return -1;\n\t }\n\t }\n\t}\n }\n bam_destroy1(r);\n hts_itr_destroy(itr);\n if (seq != NULL) free(seq);\n }\n fai_destroy(fai);\n \n // Close bam\n bam_hdr_destroy(hdr);\n hts_idx_destroy(idx);\n sam_close(samfile);\n \n // Close output BAMs\n sam_close(h1bam);\n bam_index_build(c.h1bam.string().c_str(), 0);\n if (!c.interleaved) {\n sam_close(h2bam);\n bam_index_build(c.h2bam.string().c_str(), 0);\n }\n \n // Close BCF\n bcf_hdr_destroy(bcfhdr);\n hts_idx_destroy(bcfidx);\n bcf_close(ibcffile);\n \n // End\n now = boost::posix_time::second_clock::local_time();\n std::cout << '[' << boost::posix_time::to_simple_string(now) << \"] Done.\" << std::endl;\n\n // Statistics\n uint64_t sumReads = assignedReadsH1 + assignedReadsH2 + unassignedReads + ambiguousReads;\n std::cout << \"AssignedReadsH1=\" << assignedReadsH1 << \", AssignedReadsH2=\" << assignedReadsH2 << \", UnassignedReads=\" << unassignedReads << \", AmbiguousReads=\" << ambiguousReads << \", FractionReadsAssigned=\" << (float) (assignedReadsH1 + assignedReadsH2) / (float) sumReads << \", FractionAmbiguousReads=\" << (float) (ambiguousReads) / (float) (sumReads) << std::endl;\n uint64_t sumBases = assignedBasesH1 + assignedBasesH2 + unassignedBases + ambiguousBases;\n std::cout << \"AssignedBasesH1=\" << assignedBasesH1 << \", AssignedBasesH2=\" << assignedBasesH2 << \", UnassignedBases=\" << unassignedBases << \", AmbiguousBases=\" << ambiguousBases << \", FractionBasesAssigned=\" << (float) (assignedBasesH1 + assignedBasesH2) / (float) sumBases << \", FractionAmbiguousBases=\" << (float) (ambiguousBases) / (float) (sumBases) << std::endl;\n\n#ifdef PROFILE\n ProfilerStop();\n#endif\n \n return 0;\n }\n\n int split(int argc, char **argv) {\n SplitConfig c;\n\n // Parameter\n boost::program_options::options_description generic(\"Generic options\");\n generic.add_options()\n (\"help,?\", \"show help message\")\n (\"map-qual,m\", boost::program_options::value<unsigned short>(&c.minMapQual)->default_value(10), \"min. mapping quality\")\n (\"reference,r\", boost::program_options::value<boost::filesystem::path>(&c.genome), \"reference fasta file\")\n (\"hap1,p\", boost::program_options::value<boost::filesystem::path>(&c.h1bam)->default_value(\"h1.bam\"), \"haplotype1 output file\")\n (\"hap2,q\", boost::program_options::value<boost::filesystem::path>(&c.h2bam)->default_value(\"h2.bam\"), \"haplotype2 output file\")\n (\"sample,s\", boost::program_options::value<std::string>(&c.sample)->default_value(\"NA12878\"), \"sample name (as in BCF)\")\n (\"vcffile,v\", boost::program_options::value<boost::filesystem::path>(&c.vcffile), \"input phased VCF/BCF file\")\n (\"assign,a\", \"assign unphased reads randomly\")\n (\"interleaved,i\", \"single haplotype-tagged BAM\")\n ;\n\n boost::program_options::options_description hidden(\"Hidden options\");\n hidden.add_options()\n (\"input-file\", boost::program_options::value<boost::filesystem::path>(&c.bamfile), \"input bam file\")\n ;\n\n boost::program_options::positional_options_description pos_args;\n pos_args.add(\"input-file\", -1);\n\n boost::program_options::options_description cmdline_options;\n cmdline_options.add(generic).add(hidden);\n boost::program_options::options_description visible_options;\n visible_options.add(generic);\n boost::program_options::variables_map vm;\n boost::program_options::store(boost::program_options::command_line_parser(argc, argv).options(cmdline_options).positional(pos_args).run(), vm);\n boost::program_options::notify(vm);\n\n \n // Check command line arguments\n if ((vm.count(\"help\")) || (!vm.count(\"input-file\")) || (!vm.count(\"reference\")) || (!vm.count(\"vcffile\"))) {\n std::cout << std::endl;\n std::cout << \"Usage: alfred \" << argv[0] << \" [OPTIONS] -r <ref.fa> -s NA12878 -v <snps.bcf> <unphased.bam>\" << std::endl;\n std::cout << visible_options << \"\\n\";\n return 1;\n }\n\n\n // Assign unphased reads randomly?\n if (!vm.count(\"assign\")) c.assign = false;\n else c.assign = true;\n\n // single BAM\n if (!vm.count(\"interleaved\")) c.interleaved = false;\n else c.interleaved = true;\n\n // Check input BAM file\n if (vm.count(\"input-file\")) {\n if (!(boost::filesystem::exists(c.bamfile) && boost::filesystem::is_regular_file(c.bamfile) && boost::filesystem::file_size(c.bamfile))) {\n\tstd::cerr << \"Input BAM file is missing: \" << c.bamfile.string() << std::endl;\n\treturn 1;\n }\n samFile* samfile = sam_open(c.bamfile.string().c_str(), \"r\");\n if (samfile == NULL) {\n\tstd::cerr << \"Fail to open file \" << c.bamfile.string() << std::endl;\n\treturn 1;\n }\n hts_idx_t* idx = sam_index_load(samfile, c.bamfile.string().c_str());\n if (idx == NULL) {\n\tstd::cerr << \"Fail to open index for \" << c.bamfile.string() << std::endl;\n\treturn 1;\n }\n bam_hdr_t* hdr = sam_hdr_read(samfile);\n if (hdr == NULL) {\n\tstd::cerr << \"Fail to open header for \" << c.bamfile.string() << std::endl;\n\treturn 1;\n }\n bam_hdr_destroy(hdr);\n hts_idx_destroy(idx);\n sam_close(samfile);\n }\n \n // Check VCF/BCF file\n if (vm.count(\"vcffile\")) {\n if (!(boost::filesystem::exists(c.vcffile) && boost::filesystem::is_regular_file(c.vcffile) && boost::filesystem::file_size(c.vcffile))) {\n\tstd::cerr << \"Input VCF/BCF file is missing: \" << c.vcffile.string() << std::endl;\n\treturn 1;\n }\n htsFile* ifile = bcf_open(c.vcffile.string().c_str(), \"r\");\n if (ifile == NULL) {\n\tstd::cerr << \"Fail to open file \" << c.vcffile.string() << std::endl;\n\treturn 1;\n }\n hts_idx_t* bcfidx = bcf_index_load(c.vcffile.string().c_str());\n if (bcfidx == NULL) {\n\tstd::cerr << \"Fail to open index file for \" << c.vcffile.string() << std::endl;\n\treturn 1;\n }\n bcf_hdr_t* hdr = bcf_hdr_read(ifile);\n if (hdr == NULL) {\n\tstd::cerr << \"Fail to open header for \" << c.vcffile.string() << std::endl;\n\treturn 1;\n }\n bcf_hdr_destroy(hdr);\n hts_idx_destroy(bcfidx);\n bcf_close(ifile);\n }\n\n // Show cmd\n boost::posix_time::ptime now = boost::posix_time::second_clock::local_time();\n std::cout << '[' << boost::posix_time::to_simple_string(now) << \"] \";\n std::cout << \"alfred \";\n for(int i=0; i<argc; ++i) { std::cout << argv[i] << ' '; }\n std::cout << std::endl;\n \n return phaseBamRun(c);\n}\n\n\n\n}\n\n#endif\n" }, { "alpha_fraction": 0.6534446477890015, "alphanum_fraction": 0.6701461672782898, "avg_line_length": 28.9375, "blob_id": "f2eafe7bbd8eb468b5408742c9188eeac71c0717", "content_id": "a277b68279e34721afa63ba305bdc9e87dbe0539", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 479, "license_type": "permissive", "max_line_length": 79, "num_lines": 16, "path": "/maps/exon.R", "repo_name": "tobiasrausch/bamStats", "src_encoding": "UTF-8", "text": "library(GenomicFeatures)\n\nexonTable = function(id) {\n\t db=makeTxDbFromUCSC(genome=id, tablename=\"ccdsGene\")\n\t ex=reduce(exons(db), ignore.strand=T)\n\t ex=keepStandardChromosomes(ex)\n\t ex=ex[width(ex)>1,]\n\t df=data.frame(chr=seqnames(ex), start=start(ex), end=end(ex), type=\"exonic\")\n\t gz = gzfile(paste0(\"exonic.\", id, \".bed.gz\"), \"w\")\n\t write.table(df, gz, quote=F, row.names=F, col.names=F, sep=\"\\t\")\n\t close(gz)\n}\n\nexonTable(\"mm10\")\nexonTable(\"hg19\")\nexonTable(\"hg38\")\n" }, { "alpha_fraction": 0.5856375694274902, "alphanum_fraction": 0.5923489928245544, "avg_line_length": 38.00523376464844, "blob_id": "5e281d068691d4ebbb40a1c50d3e23016001cb88", "content_id": "47a4858bdeb29ad080c47d04b61e3648330c4193", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 14900, "license_type": "permissive", "max_line_length": 220, "num_lines": 382, "path": "/src/ase.h", "repo_name": "tobiasrausch/bamStats", "src_encoding": "UTF-8", "text": "#ifndef ASE_H\n#define ASE_H\n\n#include <iostream>\n#include <vector>\n#include <fstream>\n\n#include <boost/math/distributions/binomial.hpp>\n#include <boost/program_options/cmdline.hpp>\n#include <boost/program_options/options_description.hpp>\n#include <boost/program_options/parsers.hpp>\n#include <boost/program_options/variables_map.hpp>\n#include <boost/date_time/posix_time/posix_time.hpp>\n#include <boost/date_time/gregorian/gregorian.hpp>\n#include <boost/iostreams/device/file.hpp>\n#include <boost/iostreams/stream.hpp>\n#include <boost/iostreams/stream_buffer.hpp>\n#include <boost/iostreams/filtering_stream.hpp>\n#include <boost/iostreams/filter/zlib.hpp>\n#include <boost/iostreams/filter/gzip.hpp>\n#include <boost/random.hpp>\n#include <boost/generator_iterator.hpp>\n#include <boost/tuple/tuple.hpp>\n#include <boost/filesystem.hpp>\n#include <boost/progress.hpp>\n#include <htslib/faidx.h>\n#include <htslib/sam.h>\n#include <htslib/vcf.h>\n\n#include \"util.h\"\n#include \"variants.h\"\n\nnamespace bamstats {\n\n\n struct AseConfig {\n bool isPhased;\n bool outputAll;\n unsigned short minMapQual;\n unsigned short minBaseQual;\n std::string sample;\n boost::filesystem::path as;\n boost::filesystem::path genome;\n boost::filesystem::path bamfile;\n boost::filesystem::path vcffile;\n };\n\n template<typename TConfig>\n inline int32_t\n aseRun(TConfig& c) {\n\n#ifdef PROFILE\n ProfilerStart(\"alfred.prof\");\n#endif\n \n // Load bam files\n samFile* samfile = sam_open(c.bamfile.string().c_str(), \"r\");\n hts_set_fai_filename(samfile, c.genome.string().c_str());\n hts_idx_t* idx = sam_index_load(samfile, c.bamfile.string().c_str());\n bam_hdr_t* hdr = sam_hdr_read(samfile);\n\n // Load bcf file\n htsFile* ibcffile = bcf_open(c.vcffile.string().c_str(), \"r\");\n hts_idx_t* bcfidx = bcf_index_load(c.vcffile.string().c_str());\n bcf_hdr_t* bcfhdr = bcf_hdr_read(ibcffile);\n \n boost::posix_time::ptime now = boost::posix_time::second_clock::local_time();\n std::cout << '[' << boost::posix_time::to_simple_string(now) << \"] \" << \"Assign reads to haplotypes\" << std::endl;\n boost::progress_display show_progress(hdr->n_targets);\n\n // Allele support file\n boost::iostreams::filtering_ostream dataOut;\n dataOut.push(boost::iostreams::gzip_compressor());\n dataOut.push(boost::iostreams::file_sink(c.as.string().c_str(), std::ios_base::out | std::ios_base::binary));\n dataOut << \"chr\\tpos\\tid\\tref\\talt\\tdepth\\trefsupport\\taltsupport\\tgt\\taf\\tpvalue\" << std::endl;\n \n // Assign reads to SNPs\n faidx_t* fai = fai_load(c.genome.string().c_str());\n for (int refIndex = 0; refIndex<hdr->n_targets; ++refIndex) {\n std::string chrName(hdr->target_name[refIndex]);\n ++show_progress;\n\n // Load het. markers\n typedef std::vector<BiallelicVariant> TPhasedVariants;\n TPhasedVariants pv;\n if (!_loadVariants(ibcffile, bcfidx, bcfhdr, c.sample, chrName, pv)) continue;\n if (pv.empty()) continue;\n\n // Sort variants\n std::sort(pv.begin(), pv.end(), SortVariants<BiallelicVariant>());\n\n // Load reference\n int32_t seqlen = -1;\n char* seq = NULL;\n seq = faidx_fetch_seq(fai, chrName.c_str(), 0, hdr->target_len[refIndex], &seqlen);\n \n // Annotate REF and ALT support\n typedef std::vector<uint32_t> TAlleleSupport;\n TAlleleSupport ref(pv.size(), 0);\n TAlleleSupport alt(pv.size(), 0);\n hts_itr_t* itr = sam_itr_queryi(idx, refIndex, 0, hdr->target_len[refIndex]);\n bam1_t* r = bam_init1();\n while (sam_itr_next(samfile, itr, r) >= 0) {\n\tif (r->core.flag & (BAM_FSECONDARY | BAM_FQCFAIL | BAM_FDUP | BAM_FSUPPLEMENTARY | BAM_FUNMAP)) continue;\n\tif ((r->core.qual < c.minMapQual) || (r->core.tid<0)) continue;\n\tif ((r->core.flag & BAM_FPAIRED) && (r->core.flag & BAM_FMUNMAP)) continue;\n\n\t// Fetch contained variants\n\tTPhasedVariants::const_iterator vIt = std::lower_bound(pv.begin(), pv.end(), BiallelicVariant(r->core.pos), SortVariants<BiallelicVariant>());\n\tTPhasedVariants::const_iterator vItEnd = std::upper_bound(pv.begin(), pv.end(), BiallelicVariant(lastAlignedPosition(r)), SortVariants<BiallelicVariant>());\n\tif (vIt != vItEnd) {\n\t // Get read sequence\n\t std::string sequence;\n\t sequence.resize(r->core.l_qseq);\n\t uint8_t* seqptr = bam_get_seq(r);\n\t for (int32_t i = 0; i < r->core.l_qseq; ++i) sequence[i] = \"=ACMGRSVTWYHKDBN\"[bam_seqi(seqptr, i)];\n\n\t // Get base qualities\n\t typedef std::vector<uint8_t> TQuality;\n\t TQuality quality;\n\t quality.resize(r->core.l_qseq);\n\t uint8_t* qualptr = bam_get_qual(r);\n\t for (int i = 0; i < r->core.l_qseq; ++i) quality[i] = qualptr[i];\n\t \n\t // Parse CIGAR\n\t uint32_t* cigar = bam_get_cigar(r);\n\t for(;vIt != vItEnd; ++vIt) {\n\t int32_t gp = r->core.pos; // Genomic position\n\t int32_t sp = 0; // Sequence position\n\t bool varFound = false;\n\t for (std::size_t i = 0; ((i < r->core.n_cigar) && (!varFound)); ++i) {\n\t if (bam_cigar_op(cigar[i]) == BAM_CSOFT_CLIP) sp += bam_cigar_oplen(cigar[i]);\n\t else if (bam_cigar_op(cigar[i]) == BAM_CINS) sp += bam_cigar_oplen(cigar[i]);\n\t else if (bam_cigar_op(cigar[i]) == BAM_CSOFT_CLIP) sp += bam_cigar_oplen(cigar[i]);\n\t else if (bam_cigar_op(cigar[i]) == BAM_CDEL) gp += bam_cigar_oplen(cigar[i]);\n\t else if (bam_cigar_op(cigar[i]) == BAM_CREF_SKIP) gp += bam_cigar_oplen(cigar[i]);\n\t else if (bam_cigar_op(cigar[i]) == BAM_CHARD_CLIP) {\n\t\t//Nop\n\t } else if ((bam_cigar_op(cigar[i]) == BAM_CMATCH) || (bam_cigar_op(cigar[i]) == BAM_CEQUAL) || (bam_cigar_op(cigar[i]) == BAM_CDIFF)) {\n\t\tif (gp + (int32_t) bam_cigar_oplen(cigar[i]) < vIt->pos) {\n\t\t gp += bam_cigar_oplen(cigar[i]);\n\t\t sp += bam_cigar_oplen(cigar[i]);\n\t\t} else {\n\t\t for(std::size_t k = 0; k<bam_cigar_oplen(cigar[i]); ++k, ++sp, ++gp) {\n\t\t if (gp == vIt->pos) {\n\t\t varFound = true;\n\t\t if (quality[sp] >= c.minBaseQual) {\n\t\t\t// Check REF allele\n\t\t\tif (vIt->ref == std::string(seq + gp, seq + gp + vIt->ref.size())) {\n\t\t\t // Check ALT allele\n\t\t\t if ((sp + vIt->alt.size() < sequence.size()) && (sp + vIt->ref.size() < sequence.size())) {\n\t\t\t if (vIt->ref.size() == vIt->alt.size()) {\n\t\t\t // SNP\n\t\t\t if ((sequence.substr(sp, vIt->alt.size()) == vIt->alt) && (sequence.substr(sp, vIt->ref.size()) != vIt->ref)) {\n\t\t\t\t++alt[vIt-pv.begin()];\n\t\t\t } else if ((sequence.substr(sp, vIt->alt.size()) != vIt->alt) && (sequence.substr(sp, vIt->ref.size()) == vIt->ref)) {\n\t\t\t\t++ref[vIt-pv.begin()];\n\t\t\t }\n\t\t\t }\n\t\t\t } else if (vIt->ref.size() < vIt->alt.size()) {\n\t\t\t // Insertion\n\t\t\t int32_t diff = vIt->alt.size() - vIt->ref.size();\n\t\t\t std::string refProbe = vIt->ref + std::string(seq + gp + vIt->ref.size(), seq + gp + vIt->ref.size() + diff);\n\t\t\t if ((sequence.substr(sp, vIt->alt.size()) == vIt->alt) && (sequence.substr(sp, vIt->alt.size()) != refProbe)) {\n\t\t\t ++alt[vIt-pv.begin()];\n\t\t\t } else if ((sequence.substr(sp, vIt->alt.size()) != vIt->alt) && (sequence.substr(sp, vIt->alt.size()) == refProbe)) {\n\t\t\t ++ref[vIt-pv.begin()];\n\t\t\t }\n\t\t\t } else {\n\t\t\t // Deletion\n\t\t\t int32_t diff = vIt->ref.size() - vIt->alt.size();\n\t\t\t std::string altProbe = vIt->alt + std::string(seq + gp + vIt->ref.size(), seq + gp + vIt->ref.size() + diff);\n\t\t\t if ((sequence.substr(sp, vIt->ref.size()) == altProbe) && (sequence.substr(sp, vIt->ref.size()) != vIt->ref)) {\n\t\t\t ++alt[vIt-pv.begin()];\n\t\t\t } else if ((sequence.substr(sp, vIt->ref.size()) != altProbe) && (sequence.substr(sp, vIt->ref.size()) == vIt->ref)) {\n\t\t\t ++ref[vIt-pv.begin()];\n\t\t\t }\n\t\t\t }\n\t\t\t}\n\t\t }\n\t\t }\n\t\t }\n\t\t}\n\t }\n\t else {\n\t\tstd::cerr << \"Unknown Cigar options\" << std::endl;\n\t\treturn 1;\n\t }\n\t }\n\t }\n\t}\n }\n bam_destroy1(r);\n hts_itr_destroy(itr);\n if (seqlen) free(seq);\n\n // Output (phased) allele support\n hts_itr_t* itervcf = bcf_itr_querys(bcfidx, bcfhdr, chrName.c_str());\n if (itervcf != NULL) {\n\tbcf1_t* recvcf = bcf_init1();\n\tfor (uint32_t i = 0; i<pv.size(); ++i) {\n\t // Fetch variant annotation from VCF\n\t int32_t itrRet = 0;\n\t do {\n\t itrRet = bcf_itr_next(ibcffile, itervcf, recvcf);\n\t if (itrRet >= 0) {\n\t bcf_unpack(recvcf, BCF_UN_SHR);\n\t std::vector<std::string> alleles;\n\t for(std::size_t k = 0; k<recvcf->n_allele; ++k) alleles.push_back(std::string(recvcf->d.allele[k]));\n\t if ((recvcf->pos == pv[i].pos) && (pv[i].ref == alleles[0]) && (pv[i].alt == alleles[1])) break;\n\t } else {\n\t std::cerr << \"Error: Variant not found! \" << chrName << \":\" << (pv[i].pos + 1) << std::endl;\n\t return 1;\n\t }\n\t } while (itrRet >= 0);\n\t uint32_t totalcov = ref[i] + alt[i];\n\t std::string hapstr = \"0/1\";\n\t if (c.isPhased) {\n\t if (pv[i].hap) hapstr = \"1|0\";\n\t else hapstr = \"0|1\";\n\t }\n\t if (totalcov > 0) {\n\t double h1af = 0;\n\t double vaf = (double) alt[i] / (double) totalcov;\n\t if (pv[i].hap) h1af = (double) alt[i] / (double) totalcov;\n\t else h1af = (double) ref[i] / (double) totalcov;\n\t double pval = binomTest(alt[i], totalcov, 0.5);\n\t dataOut << chrName << \"\\t\" << (pv[i].pos + 1) << \"\\t\" << recvcf->d.id << \"\\t\" << pv[i].ref << \"\\t\" << pv[i].alt << \"\\t\" << totalcov << \"\\t\" << ref[i] << \"\\t\" << alt[i] << \"\\t\" << hapstr << \"\\t\";\n\t if (c.isPhased) dataOut << h1af << \"\\t\";\n\t else dataOut << vaf << \"\\t\";\n\t dataOut << pval << std::endl;\n\t } else {\n\t if (c.outputAll) {\n\t // No coverage\n\t dataOut << chrName << \"\\t\" << (pv[i].pos + 1) << \"\\t\" << recvcf->d.id << \"\\t\" << pv[i].ref << \"\\t\" << pv[i].alt << \"\\t\" << totalcov << \"\\t\" << ref[i] << \"\\t\" << alt[i] << \"\\t\" << hapstr << \"\\tNA\\tNA\" << std::endl;\n\t }\n\t }\n\t}\n\tbcf_destroy(recvcf);\n\thts_itr_destroy(itervcf);\n }\n }\n fai_destroy(fai);\n\n // Close bam\n bam_hdr_destroy(hdr);\n hts_idx_destroy(idx);\n sam_close(samfile);\n\n // Close output allele file\n dataOut.pop();\n\n // Close BCF\n bcf_hdr_destroy(bcfhdr);\n hts_idx_destroy(bcfidx);\n bcf_close(ibcffile);\n \n // End\n now = boost::posix_time::second_clock::local_time();\n std::cout << '[' << boost::posix_time::to_simple_string(now) << \"] Done.\" << std::endl;\n \n#ifdef PROFILE\n ProfilerStop();\n#endif\n \n \n return 0;\n }\n\n int ase(int argc, char **argv) {\n AseConfig c;\n\n // Parameter\n boost::program_options::options_description generic(\"Generic options\");\n generic.add_options()\n (\"help,?\", \"show help message\")\n (\"map-qual,m\", boost::program_options::value<unsigned short>(&c.minMapQual)->default_value(10), \"min. mapping quality\")\n (\"base-qual,b\", boost::program_options::value<unsigned short>(&c.minBaseQual)->default_value(10), \"min. base quality\")\n (\"reference,r\", boost::program_options::value<boost::filesystem::path>(&c.genome), \"reference fasta file\")\n (\"sample,s\", boost::program_options::value<std::string>(&c.sample)->default_value(\"NA12878\"), \"sample name\")\n (\"ase,a\", boost::program_options::value<boost::filesystem::path>(&c.as)->default_value(\"as.tsv.gz\"), \"allele-specific output file\")\n (\"vcffile,v\", boost::program_options::value<boost::filesystem::path>(&c.vcffile), \"input (phased) BCF file\")\n (\"phased,p\", \"BCF file is phased and BAM is haplo-tagged\")\n (\"full,f\", \"output all het. input SNPs\")\n ;\n \n boost::program_options::options_description hidden(\"Hidden options\");\n hidden.add_options()\n (\"input-file\", boost::program_options::value<boost::filesystem::path>(&c.bamfile), \"input bam file\")\n ;\n \n boost::program_options::positional_options_description pos_args;\n pos_args.add(\"input-file\", -1);\n \n boost::program_options::options_description cmdline_options;\n cmdline_options.add(generic).add(hidden);\n boost::program_options::options_description visible_options;\n visible_options.add(generic);\n boost::program_options::variables_map vm;\n boost::program_options::store(boost::program_options::command_line_parser(argc, argv).options(cmdline_options).positional(pos_args).run(), vm);\n boost::program_options::notify(vm);\n \n // Check command line arguments\n if ((vm.count(\"help\")) || (!vm.count(\"input-file\")) || (!vm.count(\"reference\")) || (!vm.count(\"vcffile\"))) {\n std::cout << \"Usage: alfred \" << argv[0] << \" [OPTIONS] -r <ref.fa> -s NA12878 -v <snps.bcf> -a <ase.tsv> <input.bam>\" << std::endl;\n std::cout << visible_options << \"\\n\";\n return 1;\n }\n\n // Phased running mode?\n if (!vm.count(\"phased\")) c.isPhased = false;\n else c.isPhased = true;\n\n // Output all input het. SNPs\n if (!vm.count(\"full\")) c.outputAll = false;\n else c.outputAll = true;\n \n // Check input BAM file\n if (vm.count(\"input-file\")) {\n if (!(boost::filesystem::exists(c.bamfile) && boost::filesystem::is_regular_file(c.bamfile) && boost::filesystem::file_size(c.bamfile))) {\n\tstd::cerr << \"Input BAM file is missing: \" << c.bamfile.string() << std::endl;\n\treturn 1;\n }\n samFile* samfile = sam_open(c.bamfile.string().c_str(), \"r\");\n if (samfile == NULL) {\n std::cerr << \"Fail to open file \" << c.bamfile.string() << std::endl;\n return 1;\n }\n hts_idx_t* idx = sam_index_load(samfile, c.bamfile.string().c_str());\n if (idx == NULL) {\n\tstd::cerr << \"Fail to open index for \" << c.bamfile.string() << std::endl;\n\treturn 1;\n }\n bam_hdr_t* hdr = sam_hdr_read(samfile);\n if (hdr == NULL) {\n\tstd::cerr << \"Fail to open header for \" << c.bamfile.string() << std::endl;\n\treturn 1;\n }\n bam_hdr_destroy(hdr);\n hts_idx_destroy(idx);\n sam_close(samfile);\n }\n \n // Check VCF/BCF file\n if (vm.count(\"vcffile\")) {\n if (!(boost::filesystem::exists(c.vcffile) && boost::filesystem::is_regular_file(c.vcffile) && boost::filesystem::file_size(c.vcffile))) {\n\tstd::cerr << \"Input SNP VCF/BCF file is missing: \" << c.vcffile.string() << std::endl;\n\treturn 1;\n }\n htsFile* ifile = bcf_open(c.vcffile.string().c_str(), \"r\");\n if (ifile == NULL) {\n\tstd::cerr << \"Fail to open file \" << c.vcffile.string() << std::endl;\n\treturn 1;\n }\n hts_idx_t* bcfidx = bcf_index_load(c.vcffile.string().c_str());\n if (bcfidx == NULL) {\n\tstd::cerr << \"Fail to open index file for \" << c.vcffile.string() << std::endl;\n\treturn 1;\n }\n bcf_hdr_t* hdr = bcf_hdr_read(ifile);\n if (hdr == NULL) {\n\tstd::cerr << \"Fail to open header for \" << c.vcffile.string() << std::endl;\n\treturn 1;\n }\n bcf_hdr_destroy(hdr);\n hts_idx_destroy(bcfidx);\n bcf_close(ifile);\n }\n \n // Show cmd\n boost::posix_time::ptime now = boost::posix_time::second_clock::local_time();\n std::cout << '[' << boost::posix_time::to_simple_string(now) << \"] \";\n for(int i=0; i<argc; ++i) { std::cout << argv[i] << ' '; }\n std::cout << std::endl;\n \n return aseRun(c);\n }\n\n\n}\n\n#endif\n" }, { "alpha_fraction": 0.6080951690673828, "alphanum_fraction": 0.6205843687057495, "avg_line_length": 37.9680290222168, "blob_id": "f890abc7062ef5b409ad026497195ae1d5b3bddd", "content_id": "98bd55ace24b7fd89e148cab4fc512501dfbb7bc", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 21939, "license_type": "permissive", "max_line_length": 216, "num_lines": 563, "path": "/src/count_rna.h", "repo_name": "tobiasrausch/bamStats", "src_encoding": "UTF-8", "text": "#ifndef COUNT_RNA_H\n#define COUNT_RNA_H\n\n#include <limits>\n\n#include <boost/icl/split_interval_map.hpp>\n#include <boost/dynamic_bitset.hpp>\n#include <boost/unordered_map.hpp>\n#include <boost/date_time/posix_time/posix_time.hpp>\n#include <boost/date_time/gregorian/gregorian.hpp>\n#include <boost/progress.hpp>\n\n#include <htslib/sam.h>\n#include <htslib/faidx.h>\n\n#include \"version.h\"\n#include \"util.h\"\n#include \"gtf.h\"\n#include \"gff3.h\"\n#include \"bed.h\"\n\n\nnamespace bamstats\n{\n\n struct CountRNAConfig {\n bool ambiguous;\n uint8_t inputFileFormat; // 0 = gtf, 1 = bed, 2 = gff3\n uint8_t inputBamFormat; // 0 = bam, 1 = bed\n uint16_t stranded; // 0 = unstranded, 1 = stranded, 2 = stranded (opposite)\n uint16_t minQual;\n std::map<std::string, int32_t> nchr;\n std::string sampleName;\n std::string idname;\n std::string feature;\n std::string normalize;\n boost::filesystem::path gtfFile;\n boost::filesystem::path bedFile;\n boost::filesystem::path bamFile;\n boost::filesystem::path outfile;\n };\n\n template<typename TConfig, typename TGenomicRegions, typename TFeatureCounter>\n inline int32_t\n bed_counter(TConfig const& c, TGenomicRegions& gRegions, TFeatureCounter& fc) {\n typedef typename TGenomicRegions::value_type TChromosomeRegions;\n\n // Parse BED file\n boost::posix_time::ptime now = boost::posix_time::second_clock::local_time();\n std::cout << '[' << boost::posix_time::to_simple_string(now) << \"] \" << \"BED file parsing\" << std::endl;\n boost::progress_display show_progress(c.nchr.size());\n\n // Iterate chromosomese\n for(int32_t refIndex=0; refIndex < (int32_t) c.nchr.size(); ++refIndex) {\n ++show_progress;\n if (gRegions[refIndex].empty()) continue;\n\n // Sort by position\n std::sort(gRegions[refIndex].begin(), gRegions[refIndex].end(), SortIntervalStart<IntervalLabel>());\n\n // Flag feature positions\n typedef boost::dynamic_bitset<> TBitSet;\n TBitSet featureBitMap(250000000);\n for(uint32_t i = 0; i < gRegions[refIndex].size(); ++i)\n\tfor(int32_t k = gRegions[refIndex][i].start; k < gRegions[refIndex][i].end; ++k) featureBitMap[k] = 1;\n\n // Count hits\n std::ifstream chrFile(c.bamFile.string().c_str(), std::ifstream::in);\n if (chrFile.is_open()) {\n\twhile (chrFile.good()) {\n\t std::string chrFromFile;\n\t getline(chrFile, chrFromFile);\n\t typedef boost::tokenizer< boost::char_separator<char> > Tokenizer;\n\t boost::char_separator<char> sep(\" \\t,;\");\n\t Tokenizer tokens(chrFromFile, sep);\n\t Tokenizer::iterator tokIter = tokens.begin();\n\t if (tokIter!=tokens.end()) {\n\t std::string chrName = *tokIter++;\n\t if (c.nchr.find(chrName)->second != refIndex) continue;\n\t int32_t start = boost::lexical_cast<int32_t>(*tokIter++);\n\t int32_t end = boost::lexical_cast<int32_t>(*tokIter++);\n\t char strand = '*';\n\t if (tokIter != tokens.end()) {\n\t ++tokIter; // skip name\n\t if (tokIter != tokens.end()) {\n\t\t++tokIter; // skip score\n\t\tif (tokIter != tokens.end()) {\n\t\t strand = boost::lexical_cast<char>(*tokIter++);\n\t\t}\n\t }\n\t }\n\t if (start >= end) continue; // Bed has right-open intervals\n\t typedef std::vector<int32_t> TFeaturePos;\n\t TFeaturePos featurepos;\n\t for(int32_t i = start; i<end; ++i)\n\t if (featureBitMap[i]) featurepos.push_back(i);\n\n\t // Find feature\n\t bool ambiguous = false;\n\t int32_t featureid = -1; // No feature by default\n\t typedef std::set<int32_t> TFIdSet;\n\t TFIdSet fidset;\n\t if (!featurepos.empty()) {\n\t int32_t fpfirst = featurepos[0];\n\t int32_t fplast = featurepos[featurepos.size()-1];\n\t for(typename TChromosomeRegions::const_iterator vIt = gRegions[refIndex].begin(); vIt != gRegions[refIndex].end(); ++vIt) {\n\t\tif (vIt->end <= fpfirst) continue;\n\t\tif (vIt->start > fplast) break; // Sorted intervals so we can stop searching\n\t\tfor(TFeaturePos::const_iterator fIt = featurepos.begin(); fIt != featurepos.end(); ++fIt) {\n\t\t if ((vIt->start <= *fIt) && (vIt->end > *fIt) && (featureid != vIt->lid)) {\n\t\t if (c.stranded) {\n\t\t if (c.stranded == 1) {\n\t\t\tif (vIt->strand != strand) continue;\n\t\t } else {\n\t\t\tif (vIt->strand == strand) continue;\n\t\t }\n\t\t }\n\t\t if (c.ambiguous) fidset.insert(vIt->lid);\n\t\t else {\n\t\t if (featureid == -1) featureid = vIt->lid;\n\t\t else {\n\t\t\tambiguous = true;\n\t\t\tbreak;\n\t\t }\n\t\t }\n\t\t }\n\t\t}\n\t }\n\t }\n\t if ((!c.ambiguous) && (ambiguous)) continue; // Ambiguous read\n\n\t // Check feature agreement\n\t if (c.ambiguous) {\n\t if (fidset.empty()) continue; // No feature\n\t for(typename TFIdSet::const_iterator it = fidset.begin(); it != fidset.end(); ++it) ++fc[*it];\n\t } else {\n\t if (featureid == -1) continue; // No feature\n\t ++fc[featureid];\n\t }\n\t }\n\t}\n\tchrFile.close();\n }\n }\n return 0;\n }\n\n\n template<typename TConfig, typename TGenomicRegions, typename TFeatureCounter>\n inline int32_t\n bam_counter(TConfig const& c, TGenomicRegions& gRegions, TFeatureCounter& fc) {\n typedef typename TGenomicRegions::value_type TChromosomeRegions;\n \n // Load bam file\n samFile* samfile = sam_open(c.bamFile.string().c_str(), \"r\");\n hts_idx_t* idx = sam_index_load(samfile, c.bamFile.string().c_str());\n bam_hdr_t* hdr = sam_hdr_read(samfile);\n\n // Parse BAM file\n boost::posix_time::ptime now = boost::posix_time::second_clock::local_time();\n std::cout << '[' << boost::posix_time::to_simple_string(now) << \"] \" << \"BAM file parsing\" << std::endl;\n boost::progress_display show_progress( hdr->n_targets );\n\n // Pair qualities and features\n typedef boost::unordered_map<std::size_t, int32_t> TFeatures;\n TFeatures features;\n // Feature sets for ambiguous counting\n typedef std::set<int32_t> TFIdSet;\n typedef boost::unordered_map<std::size_t, TFIdSet> TFeatureSet;\n TFeatureSet fset;\n\n // Iterate chromosomes\n for(int32_t refIndex=0; refIndex < (int32_t) hdr->n_targets; ++refIndex) {\n ++show_progress;\n if (gRegions[refIndex].empty()) continue;\n\n // Sort by position\n std::sort(gRegions[refIndex].begin(), gRegions[refIndex].end(), SortIntervalStart<IntervalLabel>());\n int32_t maxFeatureLength = 0;\n for(uint32_t i = 0; i < gRegions[refIndex].size(); ++i) {\n\tif ((gRegions[refIndex][i].end - gRegions[refIndex][i].start) > maxFeatureLength) {\n\t maxFeatureLength = gRegions[refIndex][i].end - gRegions[refIndex][i].start;\n\t}\n }\n\n // Flag feature positions\n typedef boost::dynamic_bitset<> TBitSet;\n TBitSet featureBitMap(hdr->target_len[refIndex]);\n for(uint32_t i = 0; i < gRegions[refIndex].size(); ++i)\n\tfor(int32_t k = gRegions[refIndex][i].start; k < gRegions[refIndex][i].end; ++k) featureBitMap[k] = 1;\n\n // Count reads\n hts_itr_t* iter = sam_itr_queryi(idx, refIndex, 0, hdr->target_len[refIndex]);\n bam1_t* rec = bam_init1();\n int32_t lastAlignedPos = 0;\n std::set<std::size_t> lastAlignedPosReads;\n while (sam_itr_next(samfile, iter, rec) >= 0) {\n\tif (rec->core.flag & (BAM_FSECONDARY | BAM_FQCFAIL | BAM_FDUP | BAM_FSUPPLEMENTARY | BAM_FUNMAP)) continue;\n\tif ((rec->core.flag & BAM_FPAIRED) && ((rec->core.flag & BAM_FMUNMAP) || (rec->core.tid != rec->core.mtid))) continue; \n\tif (rec->core.qual < c.minQual) continue; // Low quality pair\n\n\tif (rec->core.flag & BAM_FPAIRED) {\n\t // Clean-up the read store for identical alignment positions\n\t if (rec->core.pos > lastAlignedPos) {\n\t lastAlignedPosReads.clear();\n\t lastAlignedPos = rec->core.pos;\n\t }\n\t}\n\n\t// Get read sequence\n\tstd::string sequence;\n\tsequence.resize(rec->core.l_qseq);\n\tuint8_t* seqptr = bam_get_seq(rec);\n\tfor (int32_t i = 0; i < rec->core.l_qseq; ++i) sequence[i] = \"=ACMGRSVTWYHKDBN\"[bam_seqi(seqptr, i)];\n\n\t// Parse CIGAR\n\tuint32_t* cigar = bam_get_cigar(rec);\n\tint32_t gp = rec->core.pos; // Genomic position\n\tint32_t sp = 0; // Sequence position\n\ttypedef std::vector<int32_t> TFeaturePos;\n\tTFeaturePos featurepos;\n\tfor (std::size_t i = 0; i < rec->core.n_cigar; ++i) {\n\t if (bam_cigar_op(cigar[i]) == BAM_CSOFT_CLIP) sp += bam_cigar_oplen(cigar[i]);\n\t else if (bam_cigar_op(cigar[i]) == BAM_CINS) sp += bam_cigar_oplen(cigar[i]);\n\t else if (bam_cigar_op(cigar[i]) == BAM_CDEL) gp += bam_cigar_oplen(cigar[i]);\n\t else if (bam_cigar_op(cigar[i]) == BAM_CREF_SKIP) gp += bam_cigar_oplen(cigar[i]);\n\t else if (bam_cigar_op(cigar[i]) == BAM_CHARD_CLIP) {\n\t //Nop\n\t } else if ((bam_cigar_op(cigar[i]) == BAM_CMATCH) || (bam_cigar_op(cigar[i]) == BAM_CEQUAL) || (bam_cigar_op(cigar[i]) == BAM_CDIFF)) {\n\t for(std::size_t k = 0; k<bam_cigar_oplen(cigar[i]); ++k, ++sp, ++gp)\n\t if (featureBitMap[gp]) featurepos.push_back(gp);\n\t } else {\n\t std::cerr << \"Unknown Cigar options\" << std::endl;\n\t return 1;\n\t }\n\t}\n\n\t// Find feature\n\tbool ambiguous = false;\n\tint32_t featureid = -1; // No feature by default\n\tTFIdSet fidset;\n\tif (!featurepos.empty()) {\n\t int32_t fpfirst = featurepos[0];\n\t int32_t fplast = featurepos[featurepos.size()-1];\n\t typename TChromosomeRegions::const_iterator vIt = std::lower_bound(gRegions[refIndex].begin(), gRegions[refIndex].end(), IntervalLabel(std::max(0, fpfirst - maxFeatureLength)), SortIntervalStart<IntervalLabel>());\n\t for(; vIt != gRegions[refIndex].end(); ++vIt) {\n\t if (vIt->end <= fpfirst) continue;\n\t if (vIt->start > fplast) break; // Sorted intervals so we can stop searching\n\t for(TFeaturePos::const_iterator fIt = featurepos.begin(); fIt != featurepos.end(); ++fIt) {\n\t if ((vIt->start <= *fIt) && (vIt->end > *fIt) && (featureid != vIt->lid)) {\n\t\tif (!_strandOkay(rec, vIt->strand, c.stranded)) continue;\n\t\tif (c.ambiguous) fidset.insert(vIt->lid);\n\t\telse {\n\t\t if (featureid == -1) featureid = vIt->lid;\n\t\t else {\n\t\t ambiguous = true;\n\t\t break;\n\t\t }\n\t\t}\n\t }\n\t }\n\t }\n\t}\n\tif ((!c.ambiguous) && (ambiguous)) continue; // Ambiguous read\n\n\tif (rec->core.flag & BAM_FPAIRED) {\n\t // First or Second Read?\t\n\t if ((rec->core.pos < rec->core.mpos) || ((rec->core.pos == rec->core.mpos) && (lastAlignedPosReads.find(hash_string(bam_get_qname(rec))) == lastAlignedPosReads.end()))) {\n\t // First read\n\t lastAlignedPosReads.insert(hash_string(bam_get_qname(rec)));\n\t std::size_t hv = hash_pair(rec);\n\t if (c.ambiguous) fset[hv] = fidset;\n\t else features[hv] = featureid;\n\t } else {\n\t // Second read\n\t std::size_t hv = hash_pair_mate(rec);\n\t if (c.ambiguous) {\n\t if (fset.find(hv) == fset.end()) continue; // Mate discarded\n\t fidset.insert(fset[hv].begin(), fset[hv].end());\n\t fset[hv] = TFIdSet();\n\t if (fidset.empty()) continue; // No feature\n\t for(typename TFIdSet::const_iterator it = fidset.begin(); it != fidset.end(); ++it) ++fc[*it];\n\t } else {\n\t if (features.find(hv) == features.end()) continue; // Mate discarded\n\t int32_t featuremate = features[hv];\n\t features[hv] = -1;\n\t \n\t // Check feature agreement\n\t if ((featureid == -1) && (featuremate == -1)) continue; // No feature\n\t else if ((featureid == -1) && (featuremate != -1)) featureid = featuremate;\n\t else if ((featureid != -1) && (featuremate == -1)) featuremate = featureid;\n\t else {\n\t\t// Both reads have a feature assignment\n\t\tif (featureid != featuremate) continue; // Feature disagreement\n\t }\n\n\t // Hurray, we finally have a valid pair\n\t ++fc[featureid];\n\t }\n\t }\n\t} else {\n\t // Single-end\n\t if (featureid != -1) ++fc[featureid];\n\t}\n }\n // Clean-up\n bam_destroy1(rec);\n hts_itr_destroy(iter);\n features.clear();\n }\n\t \n // clean-up\n bam_hdr_destroy(hdr);\n hts_idx_destroy(idx);\n sam_close(samfile);\n return 0;\n }\n\n \n template<typename TConfig>\n inline int32_t\n countRNARun(TConfig const& c) {\n\n#ifdef PROFILE\n ProfilerStart(\"alfred.prof\");\n#endif\n\n // Parse GTF file\n typedef std::vector<IntervalLabel> TChromosomeRegions;\n typedef std::vector<TChromosomeRegions> TGenomicRegions;\n TGenomicRegions gRegions;\n gRegions.resize(c.nchr.size(), TChromosomeRegions());\n typedef std::vector<std::string> TGeneIds;\n TGeneIds geneIds;\n typedef std::vector<bool> TProteinCoding;\n TProteinCoding pCoding;\n int32_t tf = 0;\n if (c.inputFileFormat == 0) tf = parseGTF(c, gRegions, geneIds, pCoding);\n else if (c.inputFileFormat == 1) tf = parseBED(c, gRegions, geneIds, pCoding);\n else if (c.inputFileFormat == 2) tf = parseGFF3(c, gRegions, geneIds, pCoding);\n if (tf == 0) {\n std::cerr << \"Error parsing GTF/GFF3/BED file!\" << std::endl;\n return 1;\n }\n\n // Get gene lengh\n typedef std::vector<uint32_t> TGeneLength;\n TGeneLength geneLength(geneIds.size(), 0);\n getGeneLength(gRegions, geneLength);\n\n // Feature counter\n typedef std::vector<int32_t> TFeatureCounter;\n TFeatureCounter fc(tf, 0);\n int32_t retparse = 1;\n if (c.inputBamFormat == 0) retparse = bam_counter(c, gRegions, fc);\n else if (c.inputBamFormat == 1) retparse = bed_counter(c, gRegions, fc);\n if (retparse != 0) {\n std::cerr << \"Error feature counting!\" << std::endl;\n return 1;\n }\n\n // Reads mapped to protein-coding sequences in the alignment\n uint64_t totalReadProtein = 0;\n TFeatureCounter pGenes;\n for(uint32_t idval = 0; idval < pCoding.size(); ++idval) {\n if (pCoding[idval]) {\n\ttotalReadProtein += fc[idval];\n\tpGenes.push_back(fc[idval]);\n }\n }\n std::sort(pGenes.begin(), pGenes.end());\n int32_t uqval = 0;\n if (!pGenes.empty()) uqval = pGenes[(int32_t) ((pGenes.size() * 3) / 4)];\n\n // Debug code\n //for(uint32_t idval = 0; idval < geneIds.size(); ++idval) std::cerr << geneIds[idval] << \"\\t\" << pCoding[idval] << \"\\t\" << geneLength[idval] << \"\\t\" << fc[idval] << std::endl;\n \n // Output count table\n boost::posix_time::ptime now = boost::posix_time::second_clock::local_time();\n std::cout << '[' << boost::posix_time::to_simple_string(now) << \"] \" << \"Output count table\" << std::endl;\n std::ofstream fcfile(c.outfile.string().c_str());\n\n if (c.normalize == \"fpkm\") {\n // FPKM\n fcfile << \"gene\\t\" << c.sampleName << std::endl;\n for(uint32_t idval = 0; idval < geneIds.size(); ++idval) {\n\tdouble fpkm = ((double) (fc[idval]) * (double) 1000000000) / ((double) (totalReadProtein) * (double) geneLength[idval]);\n\tfcfile << geneIds[idval] << \"\\t\" << fpkm << std::endl;\n }\n } else if (c.normalize == \"fpkm_uq\") {\n // FPKM-UQ\n fcfile << \"gene\\t\" << c.sampleName << std::endl;\n for(uint32_t idval = 0; idval < geneIds.size(); ++idval) {\n\tdouble fpkm_uq = ((double) (fc[idval]) * (double) 1000000000) / ((double) (uqval) * (double) geneLength[idval]);\n\tfcfile << geneIds[idval] << \"\\t\" << fpkm_uq << std::endl;\n }\n } else if (c.normalize == \"all\") {\n fcfile << \"gene\\t\" << c.sampleName + \"_raw\" << \"\\t\" << c.sampleName + \"_fpkm\" << \"\\t\" << c.sampleName + \"_fpkm_uq\" << std::endl;\n for(uint32_t idval = 0; idval < geneIds.size(); ++idval) {\n\tdouble fpkm = ((double) (fc[idval]) * (double) 1000000000) / ((double) (totalReadProtein) * (double) geneLength[idval]);\n\tdouble fpkm_uq = ((double) (fc[idval]) * (double) 1000000000) / ((double) (uqval) * (double) geneLength[idval]);\n\tfcfile << geneIds[idval] << \"\\t\" << fc[idval] << \"\\t\" << fpkm << \"\\t\" << fpkm_uq << std::endl;\n }\n } else {\n // Raw\n fcfile << \"gene\\t\" << c.sampleName << std::endl;\n for(uint32_t idval = 0; idval < geneIds.size(); ++idval) fcfile << geneIds[idval] << \"\\t\" << fc[idval] << std::endl;\n }\n fcfile.close();\n \n now = boost::posix_time::second_clock::local_time();\n std::cout << '[' << boost::posix_time::to_simple_string(now) << \"] Done.\" << std::endl;\n \n#ifdef PROFILE\n ProfilerStop();\n#endif\n\n return 0;\n }\n\n\n int count_rna(int argc, char **argv) {\n CountRNAConfig c;\n\n // Parameter\n boost::program_options::options_description generic(\"Generic options\");\n generic.add_options()\n (\"help,?\", \"show help message\")\n (\"map-qual,m\", boost::program_options::value<uint16_t>(&c.minQual)->default_value(10), \"min. mapping quality\")\n (\"stranded,s\", boost::program_options::value<uint16_t>(&c.stranded)->default_value(0), \"strand-specific counting (0: unstranded, 1: stranded, 2: reverse stranded)\")\n (\"normalize,n\", boost::program_options::value<std::string>(&c.normalize)->default_value(\"raw\"), \"normalization [raw|fpkm|fpkm_uq]\")\n (\"outfile,o\", boost::program_options::value<boost::filesystem::path>(&c.outfile)->default_value(\"gene.count\"), \"output file\")\n (\"ambiguous,a\", \"count ambiguous readsd\")\n ;\n\n boost::program_options::options_description gtfopt(\"GTF/GFF3 input file options\");\n gtfopt.add_options()\n (\"gtf,g\", boost::program_options::value<boost::filesystem::path>(&c.gtfFile), \"gtf/gff3 file\")\n (\"id,i\", boost::program_options::value<std::string>(&c.idname)->default_value(\"gene_id\"), \"gtf/gff3 attribute\")\n (\"feature,f\", boost::program_options::value<std::string>(&c.feature)->default_value(\"exon\"), \"gtf/gff3 feature\")\n ;\n \n boost::program_options::options_description bedopt(\"BED input file options, columns chr, start, end, name [, score, strand, gene_biotype]\");\n bedopt.add_options()\n (\"bed,b\", boost::program_options::value<boost::filesystem::path>(&c.bedFile), \"bed file\")\n ;\n\n boost::program_options::options_description hidden(\"Hidden options\");\n hidden.add_options()\n (\"input-file\", boost::program_options::value<boost::filesystem::path>(&c.bamFile), \"input bam file\")\n ;\n\n boost::program_options::positional_options_description pos_args;\n pos_args.add(\"input-file\", -1);\n\n boost::program_options::options_description cmdline_options;\n cmdline_options.add(generic).add(gtfopt).add(bedopt).add(hidden);\n boost::program_options::options_description visible_options;\n visible_options.add(generic).add(gtfopt).add(bedopt);\n\n // Parse command-line\n boost::program_options::variables_map vm;\n boost::program_options::store(boost::program_options::command_line_parser(argc, argv).options(cmdline_options).positional(pos_args).run(), vm);\n boost::program_options::notify(vm);\n\n // Check command line arguments\n if ((vm.count(\"help\")) || (!vm.count(\"input-file\")) || ((!vm.count(\"gtf\")) && (!vm.count(\"bed\")))) {\n std::cout << std::endl;\n std::cout << \"Usage: alfred \" << argv[0] << \" [OPTIONS] -g <hg19.gtf.gz> <aligned.bam>\" << std::endl;\n std::cout << \"Usage: alfred \" << argv[0] << \" [OPTIONS] -b <hg19.bed.gz> <aligned.bam>\" << std::endl;\n std::cout << visible_options << \"\\n\";\n return 1;\n }\n\n // Ambiguous read counting\n if (vm.count(\"ambiguous\")) c.ambiguous = true;\n else c.ambiguous = false;\n\n // Check bam file\n if (!(boost::filesystem::exists(c.bamFile) && boost::filesystem::is_regular_file(c.bamFile) && boost::filesystem::file_size(c.bamFile))) {\n std::cerr << \"Alignment file is missing: \" << c.bamFile.string() << std::endl;\n return 1;\n } else {\n if ((c.bamFile.string().length() > 3) && (c.bamFile.string().substr(c.bamFile.string().length() - 3) == \"bed\")) {\n\tc.inputBamFormat = 1;\n\tc.sampleName = c.bamFile.stem().string();\n\tstd::string oldChr = \"\";\n\ttypedef std::set<std::string> TChrSet;\n\tTChrSet chrSet;\n\tstd::ifstream chrFile(c.bamFile.string().c_str(), std::ifstream::in);\n\tif (chrFile.is_open()) {\n\t while (chrFile.good()) {\n\t std::string chrFromFile;\n\t getline(chrFile, chrFromFile);\n\t typedef boost::tokenizer< boost::char_separator<char> > Tokenizer;\n\t boost::char_separator<char> sep(\" \\t,;\");\n\t Tokenizer tokens(chrFromFile, sep);\n\t Tokenizer::iterator tokIter = tokens.begin();\n\t if (tokIter!=tokens.end()) {\n\t std::string chrName = *tokIter++;\n\t if (chrName != oldChr) chrSet.insert(chrName);\n\t }\n\t }\n\t chrFile.close();\n\t}\n\tint32_t refIndex = 0;\n\tfor(TChrSet::iterator itc = chrSet.begin(); itc != chrSet.end(); ++itc, ++refIndex) c.nchr.insert(std::make_pair(*itc, refIndex));\n } else {\n\tc.inputBamFormat = 0;\n\tsamFile* samfile = sam_open(c.bamFile.string().c_str(), \"r\");\n\tif (samfile == NULL) {\n\t std::cerr << \"Fail to open file \" << c.bamFile.string() << std::endl;\n\t return 1;\n\t}\n\thts_idx_t* idx = sam_index_load(samfile, c.bamFile.string().c_str());\n\tif (idx == NULL) {\n\t if (bam_index_build(c.bamFile.string().c_str(), 0) != 0) {\n\t std::cerr << \"Fail to open index for \" << c.bamFile.string() << std::endl;\n\t return 1;\n\t }\n\t}\n\tbam_hdr_t* hdr = sam_hdr_read(samfile);\n\tfor(int32_t refIndex=0; refIndex < hdr->n_targets; ++refIndex) c.nchr.insert(std::make_pair(hdr->target_name[refIndex], refIndex));\n\t\n\t// Get sample name\n\tstd::string sampleName;\n\tif (!getSMTag(std::string(hdr->text), c.bamFile.stem().string(), sampleName)) {\n\t std::cerr << \"Only one sample (@RG:SM) is allowed per input BAM file \" << c.bamFile.string() << std::endl;\n\t return 1;\n\t} else c.sampleName = sampleName;\n\tbam_hdr_destroy(hdr);\n\thts_idx_destroy(idx);\n\tsam_close(samfile);\n }\n }\n\n // Check region file\n if (!(boost::filesystem::exists(c.gtfFile) && boost::filesystem::is_regular_file(c.gtfFile) && boost::filesystem::file_size(c.gtfFile))) {\n if (!(boost::filesystem::exists(c.bedFile) && boost::filesystem::is_regular_file(c.bedFile) && boost::filesystem::file_size(c.bedFile))) {\n\tstd::cerr << \"Input gtf/bed file is missing.\" << std::endl;\n\treturn 1;\n } else c.inputFileFormat = 1;\n } else {\n if (is_gff3(c.gtfFile)) c.inputFileFormat = 2;\n else c.inputFileFormat = 0;\n }\n\n // Show cmd\n boost::posix_time::ptime now = boost::posix_time::second_clock::local_time();\n std::cout << '[' << boost::posix_time::to_simple_string(now) << \"] \";\n std::cout << \"alfred \";\n for(int i=0; i<argc; ++i) { std::cout << argv[i] << ' '; }\n std::cout << std::endl;\n\n return countRNARun(c);\n }\n \n\n\n \n}\n\n#endif\n" }, { "alpha_fraction": 0.5715091824531555, "alphanum_fraction": 0.5952138900756836, "avg_line_length": 35.0121955871582, "blob_id": "8e6ae5f2cce599312424a249acde3abe96830536", "content_id": "a4d948ce2ff047e4f1dad2eb185b671f4c1cb966", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 8859, "license_type": "permissive", "max_line_length": 130, "num_lines": 246, "path": "/src/repliseqproc.h", "repo_name": "tobiasrausch/bamStats", "src_encoding": "UTF-8", "text": "#ifndef REPLISEQPROC_H\n#define REPLISEQPROC_H\n\n#include <limits>\n\n#include <boost/dynamic_bitset.hpp>\n#include <boost/unordered_map.hpp>\n#include <boost/date_time/posix_time/posix_time.hpp>\n#include <boost/date_time/gregorian/gregorian.hpp>\n#include <boost/progress.hpp>\n\n#include <htslib/sam.h>\n#include <htslib/faidx.h>\n\n#include \"util.h\"\n\nnamespace bamstats\n{\n\n template<typename TConfig>\n inline int32_t\n repliseqRun(TConfig const& c) {\n // Open file handles\n typedef std::vector<samFile*> TSamFile;\n typedef std::vector<hts_idx_t*> TIndex;\n TSamFile samfile(c.files.size());\n TIndex idx(c.files.size());\n for(uint32_t file_c = 0; file_c < c.files.size(); ++file_c) {\n samfile[file_c] = sam_open(c.files[file_c].string().c_str(), \"r\");\n idx[file_c] = sam_index_load(samfile[file_c], c.files[file_c].string().c_str());\n }\n bam_hdr_t* hdr = sam_hdr_read(samfile[0]);\n\n // Genomic counts\n typedef std::vector<int32_t> TBinCount;\n typedef std::vector<TBinCount> TGenomicCount;\n typedef std::vector<TGenomicCount> TFileCounts;\n TFileCounts fc(c.files.size(), TGenomicCount());\n for(uint32_t file_c = 0; file_c < c.files.size(); ++file_c) fc[file_c].resize(hdr->n_targets, TBinCount());\n std::vector<int32_t> totalByFile(c.files.size(), 0);\n \n // Parse reference and BAM file\n boost::posix_time::ptime now = boost::posix_time::second_clock::local_time();\n std::cout << '[' << boost::posix_time::to_simple_string(now) << \"] \" << \"BAM file parsing\" << std::endl;\n boost::progress_display show_progress( hdr->n_targets );\n\n // Parse genome\n faidx_t* fai = fai_load(c.genome.string().c_str());\n for(int32_t refIndex = 0; refIndex < hdr->n_targets; ++refIndex) {\n ++show_progress;\n\n // Fetch sequence\n char* seq = NULL;\n int32_t seqlen = -1;\n std::string tname(hdr->target_name[refIndex]);\n seq = faidx_fetch_seq(fai, tname.c_str(), 0, hdr->target_len[refIndex], &seqlen);\n\n // Fetch Ns\n typedef boost::dynamic_bitset<> TBitSet;\n TBitSet nrun(hdr->target_len[refIndex]);\n for(uint32_t i = 0; i < hdr->target_len[refIndex]; ++i)\n\tif ((seq[i] == 'n') || (seq[i] == 'N')) nrun[i] = 1;\n \n \n for(unsigned int file_c = 0; file_c < c.files.size(); ++file_c) {\n\t// Set up fragment counter\n\ttypedef uint8_t TCountType;\n\tint32_t maxCount = std::numeric_limits<TCountType>::max();\n\ttypedef std::vector<TCountType> TChrCounts;\n\tTChrCounts cc(hdr->target_len[refIndex], 0);\n\n\t// Iterate bam\n\thts_itr_t* iter = sam_itr_queryi(idx[file_c], refIndex, 0, hdr->target_len[refIndex]);\n\tbam1_t* rec = bam_init1();\n\twhile (sam_itr_next(samfile[file_c], iter, rec) >= 0) {\n\t if (rec->core.flag & (BAM_FSECONDARY | BAM_FQCFAIL | BAM_FDUP | BAM_FSUPPLEMENTARY | BAM_FUNMAP)) continue;\n\t if (rec->core.qual < c.minq) continue;\n\n\t int32_t midPoint = 0;\n\t if (rec->core.flag & BAM_FPAIRED) {\n\t if ((rec->core.flag & BAM_FMUNMAP) || (rec->core.pos < rec->core.mpos) || (rec->core.tid != rec->core.mtid)) continue;\n\t int32_t outerISize = rec->core.pos - rec->core.mpos + rec->core.l_qseq;\n\t if (outerISize < 1000) midPoint = rec->core.pos + outerISize / 2;\n\t else midPoint = rec->core.pos + halfAlignmentLength(rec);\n\t } else {\n\t midPoint = rec->core.pos + halfAlignmentLength(rec);\n\t }\n\t if ((midPoint >= 0) && (midPoint < (int32_t) hdr->target_len[refIndex])) {\n\t ++totalByFile[file_c];\n\t if (cc[midPoint] < maxCount) ++cc[midPoint];\n\t }\n\t}\n\tbam_destroy1(rec);\n\thts_itr_destroy(iter);\n\n\t// Summarize counts\n\tfor(int32_t i = 0; (i + c.wsize) < (int32_t) hdr->target_len[refIndex]; i = i + c.step) {\n\t int32_t sumf = 0;\n\t int32_t nsum = 0;\n\t for(int32_t k = i; k < i + c.wsize; ++k) {\n\t sumf += cc[k];\n\t nsum += nrun[k];\n\t }\n\t // Blacklist windows with Ns\n\t if (!nsum) fc[file_c][refIndex].push_back(sumf);\n\t else fc[file_c][refIndex].push_back(0);\n\t}\n }\n if (seq != NULL) free(seq);\n }\n fai_destroy(fai);\n\n // Median normalize counts\n std::sort(totalByFile.begin(), totalByFile.end());\n int32_t med = totalByFile[totalByFile.size() / 2];\n for(uint32_t file_c = 0; file_c < c.files.size(); ++file_c) {\n double corf = (double) med / (double) totalByFile[file_c];\n for(int32_t refIndex = 0; refIndex < hdr->n_targets; ++refIndex) {\n\tfor(uint32_t k = 0; k < fc[file_c][refIndex].size(); ++k) {\n\t fc[file_c][refIndex][k] = (int32_t) (fc[file_c][refIndex][k] * corf);\n\t}\n }\n }\n\n // Percent normalized values\n typedef std::vector<double> TWindows;\n typedef std::vector<TWindows> TGenomicWindows;\n TGenomicWindows gw(hdr->n_targets, TWindows());\n TWindows all;\n for(int32_t refIndex = 0; refIndex < hdr->n_targets; ++refIndex) gw[refIndex].resize(fc[0][refIndex].size(), 0);\n for(int32_t refIndex = 0; refIndex < hdr->n_targets; ++refIndex) {\n for(uint32_t k = 0; k < fc[0][refIndex].size(); ++k) {\n\tfor(uint32_t file_c = 0; file_c < c.files.size(); ++file_c) gw[refIndex][k] += fc[file_c][refIndex][k];\n\tall.push_back(gw[refIndex][k]);\n }\n }\n std::sort(all.begin(), all.end());\n double medrep = all[all.size() / 2];\n all.clear();\n for(int32_t refIndex = 0; refIndex < hdr->n_targets; ++refIndex) {\n for(uint32_t k = 0; k < fc[0][refIndex].size(); ++k) {\n\tdouble corf = 1.0;\n\tif (gw[refIndex][k] != 0) corf = medrep / (double) gw[refIndex][k];\n\tfor(uint32_t file_c = 0; file_c < c.files.size(); ++file_c) fc[file_c][refIndex][k] = (int32_t) (fc[file_c][refIndex][k] * corf);\n }\n }\n\n // Replication track\n std::vector<double> magicformula;\n magicformula.push_back(0.917);\n magicformula.push_back(0.75);\n magicformula.push_back(0.583);\n magicformula.push_back(0.417);\n magicformula.push_back(0.25);\n for(int32_t refIndex = 0; refIndex < hdr->n_targets; ++refIndex) {\n for(uint32_t k = 0; k < gw[refIndex].size(); ++k) {\n\tgw[refIndex][k] = 0;\n\tfor(uint32_t file_c = 0; file_c < c.files.size(); ++file_c) {\n\t if (file_c < magicformula.size()) {\n\t gw[refIndex][k] += magicformula[file_c] * fc[file_c][refIndex][k];\n\t }\n\t}\n }\n }\n\n // Moving avg. smoothing\n int32_t smoothw = 75;\n for(int32_t refIndex = 0; refIndex < hdr->n_targets; ++refIndex) {\n typename TGenomicWindows::value_type tmpgw(gw[refIndex].size(), 0);\n for(int32_t k = 0; k < (int32_t) gw[refIndex].size(); ++k) {\n\tdouble mavg = 0;\n\tint32_t cavg = 0;\n\tint32_t ks = std::max(0, k-smoothw);\n\tint32_t ke = std::min((int32_t) gw[refIndex].size(), k+smoothw);\n\tfor(int32_t ki = ks; ki<ke; ++ki) {\n\t if (gw[refIndex][ki] != 0) {\n\t mavg += gw[refIndex][ki];\n\t ++cavg;\n\t }\n\t}\n\ttmpgw[k] = mavg / (double) cavg;\n }\n double maxVal = 0;\n double minVal = 40000000;\n for(int32_t k = 0; k < (int32_t) gw[refIndex].size(); ++k) {\n\tif (gw[refIndex][k] != 0) {\n\t gw[refIndex][k] = tmpgw[k];\n\t if (gw[refIndex][k] < minVal) minVal = gw[refIndex][k];\n\t if (gw[refIndex][k] > maxVal) maxVal = gw[refIndex][k];\n\t} else gw[refIndex][k] = -1;\n }\n // Normalize to [0,1]\n for(int32_t k = 0; k < (int32_t) gw[refIndex].size(); ++k) {\n\tif (gw[refIndex][k] != -1) gw[refIndex][k] = (gw[refIndex][k] - minVal) / (maxVal - minVal);\n }\n }\n\n // Output profile\n std::string statFileName = c.outprefix + \".profile.tsv\";\n std::ofstream pfile(statFileName.c_str());\n pfile << \"chr\\tpos\";\n for(uint32_t file_c = 0; file_c < c.files.size(); ++file_c) pfile << \"\\t\" << c.files[file_c].stem().string();\n pfile << std::endl;\n for(int32_t refIndex = 0; refIndex < hdr->n_targets; ++refIndex) {\n if (!fc[0][refIndex].empty()) {\n\tfor(uint32_t k = 0; k < fc[0][refIndex].size(); ++k) {\n\t pfile << hdr->target_name[refIndex] << '\\t' << k * c.step + c.wsize / 2;\n\t for(uint32_t file_c = 0; file_c < c.files.size(); ++file_c) pfile << '\\t' << fc[file_c][refIndex][k];\n\t pfile << std::endl;\n\t}\n }\n }\n pfile.close();\n\n // Output replication timing (higher values correspond to earlier replication)\n statFileName = c.outprefix + \".reptime.tsv\";\n std::ofstream rfile(statFileName.c_str());\n rfile << \"chr\\tpos\\treptime\" << std::endl;\n for(int32_t refIndex = 0; refIndex < hdr->n_targets; ++refIndex) {\n for(uint32_t k = 0; k < gw[refIndex].size(); ++k) {\n\trfile << hdr->target_name[refIndex] << '\\t' << k * c.step + c.wsize / 2 << '\\t' << gw[refIndex][k] << std::endl;\n }\n }\n rfile.close();\n \n // clean-up\n bam_hdr_destroy(hdr);\n for(unsigned int file_c = 0; file_c < c.files.size(); ++file_c) {\n hts_idx_destroy(idx[file_c]);\n sam_close(samfile[file_c]);\n }\n \n now = boost::posix_time::second_clock::local_time();\n std::cout << '[' << boost::posix_time::to_simple_string(now) << \"] Done.\" << std::endl;\n \n#ifdef PROFILE\n ProfilerStop();\n#endif\n\n\n return 0;\n }\n\n}\n\n#endif\n" }, { "alpha_fraction": 0.7407740950584412, "alphanum_fraction": 0.7637263536453247, "avg_line_length": 66.33333587646484, "blob_id": "bf5b6244b0b381dafc57ae7aad10d49681da3df9", "content_id": "7e4ef194e122b090901cceb1841aaf2bb1ce55a6", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2222, "license_type": "permissive", "max_line_length": 510, "num_lines": 33, "path": "/README.md", "repo_name": "tobiasrausch/bamStats", "src_encoding": "UTF-8", "text": "<p align=\"center\">\n <img width=\"450\" src=\"https://raw.githubusercontent.com/tobiasrausch/alfred/master/alfred.png\">\n <h1></h1>\n</p>\n\n[![Anaconda-Server Badge](https://anaconda.org/bioconda/alfred/badges/installer/conda.svg)](https://anaconda.org/bioconda/alfred)\n[![Anaconda-Server Badge](https://anaconda.org/bioconda/alfred/badges/downloads.svg)](https://anaconda.org/bioconda/alfred)\n[![C/C++ CI](https://github.com/tobiasrausch/alfred/workflows/C/C++%20CI/badge.svg)](https://github.com/tobiasrausch/alfred/actions)\n[![Docker CI](https://github.com/tobiasrausch/alfred/workflows/Docker%20CI/badge.svg)](https://hub.docker.com/r/trausch/alfred/)\n[![GitHub license](https://img.shields.io/badge/License-BSD%203--Clause-blue.svg)](https://github.com/tobiasrausch/alfred/blob/master/LICENSE)\n[![GitHub Releases](https://img.shields.io/github/release/tobiasrausch/alfred.svg)](https://github.com/tobiasrausch/alfred/releases)\n\n## Alfred: BAM alignment statistics, feature counting and feature annotation\n\nAlfred is available as a [Bioconda package](https://anaconda.org/bioconda/alfred), as a pre-compiled statically linked binary from [Alfred's github release page](https://github.com/tobiasrausch/alfred/releases/), as a singularity container [SIF file](https://github.com/tobiasrausch/alfred/releases/) or as a minimal [Docker container](https://hub.docker.com/r/trausch/alfred/). Please have a look at [Alfred's documentation](https://www.gear-genomics.com/docs/alfred/) for any installation or usage questions.\n\n[Source Code](https://github.com/tobiasrausch/alfred/)\n\n[Web Application](https://www.gear-genomics.com/alfred/)\n\n[Documentation](https://www.gear-genomics.com/docs/alfred/)\n\n## Citation\n\nTobias Rausch, Markus Hsi-Yang Fritz, Jan O Korbel, Vladimir Benes. \nAlfred: interactive multi-sample BAM alignment statistics, feature counting and feature annotation for long- and short-read sequencing.\nBioinformatics. 2019 Jul 15;35(14):2489-2491.\n[https://doi.org/10.1093/bioinformatics/bty1007](https://doi.org/10.1093/bioinformatics/bty1007)\n\n\nLicense\n-------\nAlfred is distributed under the BSD 3-Clause license. Consult the accompanying [LICENSE](https://github.com/tobiasrausch/alfred/blob/master/LICENSE) file for more details.\n" }, { "alpha_fraction": 0.7576706409454346, "alphanum_fraction": 0.7576706409454346, "avg_line_length": 34.488887786865234, "blob_id": "f84f7226e50d1876c282f68526fff8d239648c48", "content_id": "6d04b7509e0afb2e69d47bf637e584967a7adad5", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1597, "license_type": "permissive", "max_line_length": 241, "num_lines": 45, "path": "/docs/faq/README.md", "repo_name": "tobiasrausch/bamStats", "src_encoding": "UTF-8", "text": "# FAQ\n\n::: tip\nFor questions, help or feature requests please contact [email protected]\n:::\n\n[[toc]]\n\n## Does Alfred support the CRAM format?\n\nYes, Alfred uses [HTSlib](https://github.com/samtools/htslib) to read/write BAM/CRAM files.\n\n## Is there an example data set to test my Alfred installation?\n\nThe github source code includes a minimal example to check that alfred compiled properly from source and that the web front end is working.\n\n```bash\nalfred qc -r example/E.coli.fa.gz -o example/stats.tsv.gz example/E.coli.cram\nRscript scripts/stats.R example/stats.tsv.gz\n```\n\nFor the web front end.\n\n```bash\nalfred qc -r example/E.coli.fa.gz -f json -o ecoli.json.gz example/E.coli.cram\n```\n\nPlease upload `ecoli.json.gz` to the [Alfred web application](https://www.gear-genomics.com/alfred).\n\n## Is the feature counting paired-end aware?\n\nYes, Alfred counts fragments (read pairs) and not individual reads.\n\n## Why are hard clipping statistics always zero?\n\nMany aligners trim primary alignments using soft-clips and only secondary and supplementary alignments use hard clips. For long reads you may want to evaluate secondary and supplementary alignments using the `-s` and `-u` command-line flags.\n\n```bash\nalfred qc -su -r <genome.fa> <input.bam>\n```\n\n## Calculation of InDel rates and sequencing error rate?\n\nThe sequencing error rates are calculated over all aligned bases. The total number of deletions, character D in the Cigar string, is divided\nby the total number of aligned bases (Cigar M). The same approach is used for insertion (Cigar I) and mismatches (Cigar M and mismatch to reference).\n" }, { "alpha_fraction": 0.5995743870735168, "alphanum_fraction": 0.6191756129264832, "avg_line_length": 40.33333206176758, "blob_id": "ebe2b692fabf8a9e3d31978e690d68b84764b34d", "content_id": "d9798db9f1443d13e75fc28bdb94d4c977362d0a", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 8928, "license_type": "permissive", "max_line_length": 530, "num_lines": 216, "path": "/src/spaced.h", "repo_name": "tobiasrausch/bamStats", "src_encoding": "UTF-8", "text": "#ifndef SPACED_H\n#define SPACED_H\n\n#include <limits>\n\n#include <boost/icl/split_interval_map.hpp>\n#include <boost/dynamic_bitset.hpp>\n#include <boost/unordered_map.hpp>\n#include <boost/date_time/posix_time/posix_time.hpp>\n#include <boost/date_time/gregorian/gregorian.hpp>\n#include <boost/progress.hpp>\n\n#include <htslib/sam.h>\n#include <htslib/faidx.h>\n\n#include \"version.h\"\n#include \"util.h\"\n#include \"gtf.h\"\n#include \"gff3.h\"\n#include \"bed.h\"\n#include \"motif.h\"\n\nnamespace bamstats\n{\n\n struct SpacedConfig {\n int32_t low;\n int32_t high;\n std::string motif1;\n std::string motif2;\n boost::filesystem::path infile;\n boost::filesystem::path outfile;\n };\n\n template<typename TConfig>\n inline int32_t\n spacedMotifRun(TConfig const& c) {\n int32_t motiflen1 = -1;\n int32_t motiflen2 =\t-1;\n\n // Motif hits\n typedef std::pair<uint32_t, float> TPosScore;\n typedef std::vector<TPosScore> TMotifHit;\n typedef std::vector<TMotifHit> TGenomicMotifHit;\n typedef std::vector<TGenomicMotifHit> TStrandGenomicHit;\n TStrandGenomicHit mo1(2, TGenomicMotifHit());\n TStrandGenomicHit mo2(2, TGenomicMotifHit());\n\n // Chromosome map\n typedef std::map<std::string, uint32_t> TChrMap;\n TChrMap chrMap;\n uint32_t numChr = 0;\n std::vector<std::string> revMap;\n \n // Parse motif hits\n std::ifstream file(c.infile.string().c_str(), std::ios_base::in | std::ios_base::binary);\n boost::iostreams::filtering_streambuf<boost::iostreams::input> dataIn;\n dataIn.push(boost::iostreams::gzip_decompressor());\n dataIn.push(file);\n std::istream instream(&dataIn);\n std::string gline;\n while(std::getline(instream, gline)) {\n typedef boost::tokenizer< boost::char_separator<char> > Tokenizer;\n boost::char_separator<char> sep(\" \\t\");\n Tokenizer tokens(gline, sep);\n Tokenizer::iterator tokIter = tokens.begin();\n if (tokIter != tokens.end()) {\n\tstd::string chrName = *tokIter++;\n\tstd::string pos = *tokIter;\n\tif (pos == \"start\") continue; // Header\n\tint32_t start = boost::lexical_cast<int32_t>(*tokIter++);\n\tint32_t end = boost::lexical_cast<int32_t>(*tokIter++);\n\tstd::string id = *tokIter++;\n\tif ((id == c.motif1) || (id == c.motif2)) {\n\t std::string strandStr = *tokIter++;\n\t char strand = strandStr[0];\n\t float score = boost::lexical_cast<float>(*tokIter);\n\t uint32_t refIndex = numChr;\n\t TChrMap::const_iterator it = chrMap.find(chrName);\n\t if (it == chrMap.end()) {\n\t chrMap.insert(std::make_pair(chrName, numChr));\n\t revMap.push_back(chrName);\n\t ++numChr;\n\t mo1[0].resize(numChr, TMotifHit());\n\t mo1[1].resize(numChr, TMotifHit());\n\t mo2[0].resize(numChr, TMotifHit());\n\t mo2[1].resize(numChr, TMotifHit());\n\t } else refIndex = it->second;\n\t if (id == c.motif1) {\n\t int32_t len1 = (end - start) + 1;\n\t if (motiflen1 == -1) motiflen1 = len1;\n\t else if (motiflen1 != len1) {\n\t std::cerr << \"Warning: Motif hits have different lengths!\" << std::endl;\n\t }\n\t if (strand == '+') mo1[0][refIndex].push_back(std::make_pair(start, score));\n\t else mo1[1][refIndex].push_back(std::make_pair(start, score));\n\t } else {\n\t int32_t len2 = (end - start) + 1;\n\t if (motiflen2 == -1) motiflen2 = len2;\n\t else if (motiflen2 != len2) {\n\t std::cerr\t<< \"Warning: Motif hits have different lengths!\" << std::endl;\n\t }\n\t if (strand == '+') mo2[0][refIndex].push_back(std::make_pair(start, score));\n\t else mo2[1][refIndex].push_back(std::make_pair(start, score));\n\t }\n\t}\n }\n }\n dataIn.pop();\n\n // Output file\n boost::iostreams::filtering_ostream dataOut;\n dataOut.push(boost::iostreams::gzip_compressor());\n dataOut.push(boost::iostreams::file_sink(c.outfile.string().c_str(), std::ios_base::out | std::ios_base::binary));\n dataOut << \"chr\\tstart\\tend\\tjoined_motifs\" << std::endl;\n \n // Motifs\n for(uint32_t strand = 0; strand < 2; ++strand) {\n char strandlabel = '+';\n if (strand) strandlabel = '-';\n for(uint32_t refIndex = 0; refIndex < revMap.size(); ++refIndex) {\n\tstd::sort(mo1[strand][refIndex].begin(), mo1[strand][refIndex].end());\n\tstd::sort(mo2[strand][refIndex].begin(), mo2[strand][refIndex].end());\n\tfor(uint32_t i = 0; i < mo1[strand][refIndex].size(); ++i) {\n\t for(uint32_t j = 0; j < mo2[strand][refIndex].size(); ++j) {\n\t std::vector<int32_t> allpos;\n\t allpos.push_back(mo1[strand][refIndex][i].first);\n\t allpos.push_back(mo1[strand][refIndex][i].first + motiflen1 - 1);\n\t allpos.push_back(mo2[strand][refIndex][j].first);\n\t allpos.push_back(mo2[strand][refIndex][j].first + motiflen2 - 1);\n\t std::sort(allpos.begin(), allpos.end());\n\t int32_t spacing = 0; // Overlapping motifs\n\t if (mo1[strand][refIndex][i].first + motiflen1 - 1 < mo2[strand][refIndex][j].first) spacing = mo2[strand][refIndex][j].first - (mo1[strand][refIndex][i].first + motiflen1);\n\t if (mo2[strand][refIndex][j].first + motiflen2 - 1 < mo1[strand][refIndex][i].first) spacing = mo1[strand][refIndex][i].first - (mo2[strand][refIndex][j].first + motiflen2);\n\t // Correct spacing?\n\t if ((c.low <= spacing) && (spacing <= c.high)) {\n\t // Correct order?\n\t if (strand) {\n\t\tif (mo2[strand][refIndex][j].first < mo1[strand][refIndex][i].first) {\n\t\t dataOut << revMap[refIndex] << \"\\t\" << allpos[0] << \"\\t\" << allpos[3] << \"\\t\" << revMap[refIndex] << ',' << mo1[strand][refIndex][i].first << ',' << mo1[strand][refIndex][i].first + motiflen1 - 1 << ',' << c.motif1 << ',' << strandlabel << ',' << mo1[strand][refIndex][i].second << ':' << revMap[refIndex] << ',' << mo2[strand][refIndex][j].first << ',' << mo2[strand][refIndex][j].first + motiflen2 - 1 << ',' << c.motif2 << ',' << strandlabel << ',' << mo2[strand][refIndex][j].second << \":Spacing=\" << spacing << std::endl;\n\t\t}\n\t } else {\n\t\tif (mo1[strand][refIndex][i].first < mo2[strand][refIndex][j].first) {\n\t\t dataOut << revMap[refIndex] << \"\\t\" << allpos[0] << \"\\t\" << allpos[3] << \"\\t\" << revMap[refIndex] << ',' << mo1[strand][refIndex][i].first << ',' << mo1[strand][refIndex][i].first + motiflen1 - 1 << ',' << c.motif1 << ',' << strandlabel << ',' << mo1[strand][refIndex][i].second << ':' << revMap[refIndex] << ',' << mo2[strand][refIndex][j].first << ',' << mo2[strand][refIndex][j].first + motiflen2 - 1 << ',' << c.motif2 << ',' << strandlabel << ',' << mo2[strand][refIndex][j].second << \":Spacing=\" << spacing << std::endl;\n\t\t}\n\t }\n\t }\n\t }\n\t}\n }\n }\n dataOut.pop();\n\n // Done\n boost::posix_time::ptime now = boost::posix_time::second_clock::local_time();\n std::cout << '[' << boost::posix_time::to_simple_string(now) << \"] Done.\" << std::endl;\n return 0;\n }\n\n\n int spaced(int argc, char **argv) {\n SpacedConfig c;\n\n // Parameter\n boost::program_options::options_description generic(\"Generic options\");\n generic.add_options()\n (\"help,?\", \"show help message\")\n (\"motif1,m\", boost::program_options::value<std::string>(&c.motif1)->default_value(\"Heptamer\"), \"motif1 name\")\n (\"motif2,n\", boost::program_options::value<std::string>(&c.motif2)->default_value(\"Nonamer\"), \"motif2 name\")\n (\"spacer-low,l\", boost::program_options::value<int32_t>(&c.low)->default_value(11), \"min. spacer length\")\n (\"spacer-high,h\", boost::program_options::value<int32_t>(&c.high)->default_value(13), \"max. spacer length\")\n (\"outfile,o\", boost::program_options::value<boost::filesystem::path>(&c.outfile)->default_value(\"joined.bed.gz\"), \"joined motif hits\")\n ;\n\n boost::program_options::options_description hidden(\"Hidden options\");\n hidden.add_options()\n (\"input-file\", boost::program_options::value<boost::filesystem::path>(&c.infile), \"input file\")\n ;\n\n boost::program_options::positional_options_description pos_args;\n pos_args.add(\"input-file\", -1);\n\n boost::program_options::options_description cmdline_options;\n cmdline_options.add(generic).add(hidden);\n boost::program_options::options_description visible_options;\n visible_options.add(generic);\n\n // Parse command-line\n boost::program_options::variables_map vm;\n boost::program_options::store(boost::program_options::command_line_parser(argc, argv).options(cmdline_options).positional(pos_args).run(), vm);\n boost::program_options::notify(vm);\n\n // Check command line arguments\n if ((vm.count(\"help\")) || (!vm.count(\"input-file\"))) {\n std::cout << std::endl;\n std::cout << \"Usage: alfred \" << argv[0] << \" [OPTIONS] <motif.hits.gz>\" << std::endl;\n std::cout << visible_options << \"\\n\";\n return 1;\n }\n\n // Show cmd\n boost::posix_time::ptime now = boost::posix_time::second_clock::local_time();\n std::cout << '[' << boost::posix_time::to_simple_string(now) << \"] \";\n std::cout << \"alfred \";\n for(int i=0; i<argc; ++i) { std::cout << argv[i] << ' '; }\n std::cout << std::endl;\n\n return spacedMotifRun(c);\n }\n \n\n\n \n}\n\n#endif\n" }, { "alpha_fraction": 0.6326132416725159, "alphanum_fraction": 0.6741673946380615, "avg_line_length": 28.626667022705078, "blob_id": "358a97090eb197ea846ce4d94aeee7ea41a58ba4", "content_id": "3db3de31973e1e3203829fc92da826fa400fa5b9", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 6666, "license_type": "permissive", "max_line_length": 279, "num_lines": 225, "path": "/src/qcstruct.h", "repo_name": "tobiasrausch/bamStats", "src_encoding": "UTF-8", "text": "#ifndef QCSTRUCT_H\n#define QCSTRUCT_H\n\n#include <limits>\n\n#include <boost/dynamic_bitset.hpp>\n#include <boost/unordered_map.hpp>\n#include <boost/date_time/posix_time/posix_time.hpp>\n#include <boost/date_time/gregorian/gregorian.hpp>\n#include <boost/progress.hpp>\n#include <boost/iostreams/device/file.hpp>\n#include <boost/iostreams/filtering_stream.hpp>\n#include <boost/iostreams/filter/zlib.hpp>\n#include <boost/iostreams/filter/gzip.hpp>\n\n#include <htslib/sam.h>\n#include <htslib/faidx.h>\n\n#include \"util.h\"\n\nnamespace bamstats\n{\n\n struct ChrGC {\n uint32_t ncount;\n uint32_t gccount;\n };\n\n struct ReferenceFeatures {\n typedef std::vector<uint64_t> TGCContent;\n typedef std::vector<Interval> TChromosomeRegions;\n typedef std::vector<TChromosomeRegions> TGenomicRegions;\n \n uint64_t referencebp;\n uint64_t ncount;\n uint32_t totalBedSize;\n uint32_t nchr;\n std::vector<ChrGC> chrGC;\n TGenomicRegions gRegions;\n TGCContent refGcContent;\n \n\n explicit ReferenceFeatures(uint32_t const nc) : referencebp(0), ncount(0), totalBedSize(0), nchr(nc) {\n chrGC.resize(nc, ChrGC());\n gRegions.resize(nc, TChromosomeRegions());\n refGcContent.resize(102, 0);\n }\n };\n\n \n struct BaseCounts {\n typedef uint32_t TCountType;\n typedef std::vector<TCountType> TCoverageBp;\n \n typedef uint16_t TMaxCoverage;\n typedef std::vector<TMaxCoverage> TBpCoverage;\n\n uint32_t maxCoverage;\n uint32_t maxIndelSize;\n uint64_t n1;\n uint64_t n2;\n uint64_t nd;\n uint64_t matchCount;\n uint64_t mismatchCount;\n uint64_t delCount;\n uint64_t insCount;\n uint64_t softClipCount;\n uint64_t hardClipCount;\n std::vector<uint32_t> delHomACGTN; // A:0, C:1, G:2, T:3, N:4, none:5\n std::vector<uint32_t> insHomACGTN; // A:0, C:1, G:2, T:3, N:4, none:5\n std::vector<uint32_t> delSize;\n std::vector<uint32_t> insSize;\n TCoverageBp bpWithCoverage;\n TBpCoverage cov;\n\n BaseCounts() : maxCoverage(std::numeric_limits<TMaxCoverage>::max()), maxIndelSize(50), n1(0), n2(0), nd(0), matchCount(0), mismatchCount(0), delCount(0), insCount(0), softClipCount(0), hardClipCount(0) {\n delHomACGTN.resize(6, 0);\n insHomACGTN.resize(6, 0);\n bpWithCoverage.resize(maxCoverage + 1, 0);\n delSize.resize(maxIndelSize + 1, 0);\n insSize.resize(maxIndelSize + 1, 0);\n cov.clear();\n }\n };\n\n struct ReadCounts {\n typedef uint16_t TMaxReadLength;\n typedef uint32_t TCountType;\n typedef std::vector<TCountType> TLengthReadCount;\n typedef std::vector<TLengthReadCount> TLenRead12;\n typedef std::vector<uint64_t> TBaseQualitySum;\n typedef std::vector<TBaseQualitySum> TBQRead12;\n typedef ReferenceFeatures::TGCContent TGCContent;\n typedef boost::dynamic_bitset<> TBitSet;\n typedef std::pair<int32_t, int32_t> TStartEndPair;\n typedef std::map<int32_t, TStartEndPair> TBlockRange;\n typedef std::vector<TBlockRange> TGenomicBlockRange;\n typedef std::vector<uint64_t> TMappedChr;\n \n int32_t maxReadLength;\n int32_t maxUMI;\n int64_t secondary;\n int64_t qcfail;\n int64_t dup;\n int64_t supplementary;\n int64_t unmap;\n int64_t forward;\n int64_t reverse;\n int64_t spliced;\n int64_t mapped1;\n int64_t mapped2;\n int64_t haplotagged;\n int64_t mitagged;\n TMappedChr mappedchr;\n TLenRead12 lRc;\n TLenRead12 nCount;\n TLenRead12 aCount;\n TLenRead12 cCount;\n TLenRead12 gCount;\n TLenRead12 tCount;\n TBQRead12 bqCount;\n TGCContent gcContent;\n TBitSet umi;\n TGenomicBlockRange brange;\n\n explicit ReadCounts(uint32_t const n_targets) : maxReadLength(std::numeric_limits<TMaxReadLength>::max()), maxUMI(10000000), secondary(0), qcfail(0), dup(0), supplementary(0), unmap(0), forward(0), reverse(0), spliced(0), mapped1(0), mapped2(0), haplotagged(0), mitagged(0) {\n mappedchr.resize(n_targets, 0);\n lRc.resize(2, TLengthReadCount());\n bqCount.resize(2, TBaseQualitySum());\n aCount.resize(2, TLengthReadCount());\n cCount.resize(2, TLengthReadCount());\n gCount.resize(2, TLengthReadCount());\n tCount.resize(2, TLengthReadCount());\n nCount.resize(2, TLengthReadCount());\n for(uint32_t k = 0; k<2; ++k) {\n\tlRc[k].resize(maxReadLength + 1, 0);\n\tbqCount[k].resize(maxReadLength + 1, 0);\n\taCount[k].resize(maxReadLength + 1, 0);\n\tcCount[k].resize(maxReadLength + 1, 0);\n\tgCount[k].resize(maxReadLength + 1, 0);\n\ttCount[k].resize(maxReadLength + 1, 0);\n\tnCount[k].resize(maxReadLength + 1, 0);\n }\n gcContent.resize(102, 0);\n }\n };\n\n struct PairCounts {\n typedef uint16_t TMaxInsertSize;\n typedef uint32_t TCountType;\n typedef std::vector<TCountType> TISizePairCount;\n int32_t maxInsertSize;\n int64_t paired;\n int64_t mapped;\n int64_t mappedSameChr;\n int64_t mappedProper;\n int64_t orient[4];\n int64_t totalISizeCount;\n TISizePairCount fPlus;\n TISizePairCount rPlus;\n TISizePairCount fMinus;\n TISizePairCount rMinus;\n \n \n PairCounts() : maxInsertSize(std::numeric_limits<TMaxInsertSize>::max()), paired(0), mapped(0), mappedSameChr(0), mappedProper(0), totalISizeCount(0) {\n orient[0] = 0;\n orient[1] = 0;\n orient[2] = 0;\n orient[3] = 0;\n fPlus.resize(maxInsertSize + 1, 0);\n rPlus.resize(maxInsertSize + 1, 0);\n fMinus.resize(maxInsertSize + 1, 0);\n rMinus.resize(maxInsertSize + 1, 0);\n }\n };\n\n \n struct QualCounts {\n typedef uint8_t TMaxQuality;\n typedef uint32_t TCountType;\n typedef std::vector<TCountType> TQualCount;\n int32_t maxQuality;\n TQualCount qcount;\n\n QualCounts() : maxQuality(std::numeric_limits<TMaxQuality>::max()) {\n qcount.resize(maxQuality + 1, 0);\n }\n };\n \n \n struct ReadGroupStats {\n BaseCounts bc;\n ReadCounts rc;\n PairCounts pc;\n QualCounts qc;\n \n ReadGroupStats(uint32_t const n_targets) : bc(BaseCounts()), rc(ReadCounts(n_targets)), pc(PairCounts()), qc(QualCounts()) {}\n };\n\n\n struct BedCounts {\n typedef double TAvgCov;\n typedef std::vector<TAvgCov> TBpCov;\n typedef boost::unordered_map<std::string, TBpCov> TRgBpMap;\n typedef std::vector<TRgBpMap> TGenomicBp;\n\n typedef std::vector<int64_t> TOnTargetBp;\n typedef boost::unordered_map<std::string, TOnTargetBp> TOnTargetMap;\n typedef std::vector<uint64_t> TGCContent;\n \n int32_t stepsize;\n int32_t onTSize;\n TGenomicBp gCov;\n TOnTargetMap onTarget;\n TGCContent bedGcContent;\n \n BedCounts(int32_t nchr, int32_t s, int32_t vs) : stepsize(s), onTSize(vs) {\n gCov.resize(nchr, TRgBpMap());\n bedGcContent.resize(102, 0);\n }\n };\n \n}\n\n#endif\n" }, { "alpha_fraction": 0.7727771401405334, "alphanum_fraction": 0.7771679759025574, "avg_line_length": 49.61111068725586, "blob_id": "a24802a417a3bd6549298c4eb7f0213fadcb4e7a", "content_id": "9c64e29838f2f2e1e68a48c3fcf7583cd75a260b", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 911, "license_type": "permissive", "max_line_length": 537, "num_lines": 18, "path": "/docs/README.md", "repo_name": "tobiasrausch/bamStats", "src_encoding": "UTF-8", "text": "![Alfred logo](./images/alfred.png)\n\n# Alfred: BAM Statistics, Feature Counting and Feature Annotation\n\n[Alfred](https://github.com/tobiasrausch/alfred) is an efficient and versatile command-line application that computes multi-sample quality control metrics in a read-group aware manner. Alfred supports read counting, feature annotation and haplotype-resolved consensus computation using multiple sequence alignments. Alfred's [companion web application](https://www.gear-genomics.com/alfred/) enables interactive exploration of results. All code is open-source and hosted on [Alfred's GitHub page](https://github.com/tobiasrausch/alfred).\n\nContents:\n\n1. [Installation](/installation/)\n2. [Usage](/cli/)\n3. [Web Application](/webapp/)\n4. [FAQ](/faq/)\n\n::: tip\nFor questions, help or feature requests please contact [email protected]\n:::\n\nPlease cite Alfred's URL (https://www.gear-genomics.com/alfred) in publications.\n" }, { "alpha_fraction": 0.7862318754196167, "alphanum_fraction": 0.7862318754196167, "avg_line_length": 38.42856979370117, "blob_id": "9f71a799c9571084b3eceba28eeece8e92a7a97f", "content_id": "a54f6d0d7e98728a2267febc7c19e7c0687bb7ef", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 276, "license_type": "permissive", "max_line_length": 104, "num_lines": 7, "path": "/singularity/README.md", "repo_name": "tobiasrausch/bamStats", "src_encoding": "UTF-8", "text": "You can build an [alfred](https://github.com/tobiasrausch/alfred) singularity container (SIF file) using\n\n`sudo singularity build alfred.sif alfred.def`\n\nOnce you have built the container you can run analysis using\n\n`singularity exec alfred.sif alfred qc -r ref.fa input.bam`\n" }, { "alpha_fraction": 0.7978141903877258, "alphanum_fraction": 0.7978141903877258, "avg_line_length": 35.599998474121094, "blob_id": "2ae807977f17dcb382c0bdf63a9ce9f627b3d637", "content_id": "98af7b57ba055b62e359519e4d2d4037250ed6c3", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 183, "license_type": "permissive", "max_line_length": 131, "num_lines": 5, "path": "/test/README.md", "repo_name": "tobiasrausch/bamStats", "src_encoding": "UTF-8", "text": "## Alfred: Bioconda installation test\n\nSimple test makefile that pulls miniconda, adds the bioconda channel and installs alfred. It then displays the Alfred help message.\n\n`make all`\n" }, { "alpha_fraction": 0.6588055491447449, "alphanum_fraction": 0.6661010980606079, "avg_line_length": 37.77631759643555, "blob_id": "5ba8d48a9748a81eaeb40cc9a3d8f98f50aeb058", "content_id": "c816337811e0d5ea53ca69ab05e24d730ad0fbad", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5894, "license_type": "permissive", "max_line_length": 195, "num_lines": 152, "path": "/src/bed.h", "repo_name": "tobiasrausch/bamStats", "src_encoding": "UTF-8", "text": "#ifndef BED_H\n#define BED_H\n\n#include <boost/filesystem.hpp>\n#include <boost/algorithm/string.hpp>\n#include <boost/algorithm/string.hpp>\n#include <boost/iostreams/filtering_streambuf.hpp>\n#include <boost/iostreams/filtering_stream.hpp>\n#include <boost/iostreams/copy.hpp>\n#include <boost/iostreams/filter/gzip.hpp>\n#include <boost/iostreams/device/file.hpp>\n#include <boost/algorithm/string.hpp>\n\n#include <htslib/sam.h>\n\n#include \"util.h\"\n\nnamespace bamstats\n{\n\n template<typename TConfig, typename TGenomicRegions, typename TGeneIds, typename TProteinCoding>\n inline int32_t\n parseBEDAll(TConfig const& c, TGenomicRegions& overlappingRegions, TGeneIds& geneIds, TProteinCoding& pCoding) {\n boost::posix_time::ptime now = boost::posix_time::second_clock::local_time();\n std::cout << '[' << boost::posix_time::to_simple_string(now) << \"] \" << \"BED feature parsing\" << std::endl;\n \n // Check gzip\n if (!is_gz(c.bedFile)) {\n std::cerr << \"BED file is not gzipped!\" << std::endl;\n return 0;\n }\n\n // Map IDs to integer\n typedef std::map<std::string, int32_t> TIdMap;\n TIdMap idMap;\n\n // Keep track of unique exon IDs\n int32_t eid = 0;\n\n // Parse BED\n std::ifstream file(c.bedFile.string().c_str(), std::ios_base::in | std::ios_base::binary);\n boost::iostreams::filtering_streambuf<boost::iostreams::input> dataIn;\n dataIn.push(boost::iostreams::gzip_decompressor());\n dataIn.push(file);\n std::istream instream(&dataIn);\n std::string gline;\n while(std::getline(instream, gline)) {\n if ((gline.size()) && (gline[0] == '#')) continue;\n typedef boost::tokenizer< boost::char_separator<char> > Tokenizer;\n boost::char_separator<char> sep(\" \\t,;\");\n Tokenizer tokens(gline, sep);\n Tokenizer::iterator tokIter = tokens.begin();\n if (tokIter==tokens.end()) {\n\tstd::cerr << \"Empty line in BED file!\" << std::endl;\n\treturn 0;\n }\n std::string chrName=*tokIter++;\n if (c.nchr.find(chrName) == c.nchr.end()) continue;\n int32_t chrid = c.nchr.find(chrName)->second;\n if (tokIter == tokens.end()) {\n\tstd::cerr << \"Corrupted BED file!\" << std::endl;\n\treturn 0;\n }\n int32_t start = boost::lexical_cast<int32_t>(*tokIter++);\n int32_t end = boost::lexical_cast<int32_t>(*tokIter++);\n if (tokIter == tokens.end()) {\n\tstd::cerr << \"Name is missing in BED file!\" << std::endl;\n\treturn 0;\n }\n std::string val = *tokIter++;\n char strand = '*';\n std::string biotype = \"NA\";\n if (tokIter != tokens.end()) {\n\t++tokIter; // skip score\n\tstrand = boost::lexical_cast<char>(*tokIter++);\n\tbiotype = *tokIter++;\n }\n int32_t idval = geneIds.size();\n typename TIdMap::const_iterator idIter = idMap.find(val);\n if (idIter == idMap.end()) {\n\tidMap.insert(std::make_pair(val, idval));\n\tgeneIds.push_back(val);\n\tif (biotype == \"protein_coding\") pCoding.push_back(true);\n\telse pCoding.push_back(false);\n } else idval = idIter->second;\n // BED is 0-based and right-open, no need to convert\n if (start > end) {\n\tstd::cerr << \"Feature start is greater than feature end!\" << std::endl;\n\treturn 0;\n }\n //std::cerr << geneIds[idval] << \"\\t\" << start << \"\\t\" << end << std::endl;\n _insertInterval(overlappingRegions[chrid], start, end, strand, idval, eid++);\n }\n return geneIds.size();\n } \n\n template<typename TConfig, typename TGenomicRegions, typename TGeneIds>\n inline int32_t\n parseBEDAll(TConfig const& c, TGenomicRegions& overlappingRegions, TGeneIds& geneIds) {\n std::vector<bool> pCoding;\n return parseBEDAll(c, overlappingRegions, geneIds, pCoding);\n }\n \n\n template<typename TConfig, typename TGenomicRegions, typename TGeneIds, typename TProteinCoding>\n inline int32_t\n parseBED(TConfig const& c, TGenomicRegions& gRegions, TGeneIds& geneIds, TProteinCoding& pCoding) {\n typedef typename TGenomicRegions::value_type TChromosomeRegions;\n\n // Overlapping intervals for each label\n TGenomicRegions overlappingRegions;\n overlappingRegions.resize(gRegions.size(), TChromosomeRegions());\n parseBEDAll(c, overlappingRegions, geneIds, pCoding);\n\n // Make intervals non-overlapping for each label\n for(uint32_t refIndex = 0; refIndex < overlappingRegions.size(); ++refIndex) {\n // Sort by ID\n std::sort(overlappingRegions[refIndex].begin(), overlappingRegions[refIndex].end(), SortIntervalLabel<IntervalLabel>());\n int32_t runningId = -1;\n char runningStrand = '*';\n typedef boost::icl::interval_set<uint32_t> TIdIntervals;\n typedef typename TIdIntervals::interval_type TIVal;\n TIdIntervals idIntervals;\n for(uint32_t i = 0; i < overlappingRegions[refIndex].size(); ++i) {\n\tif (overlappingRegions[refIndex][i].lid != runningId) {\n\t for(typename TIdIntervals::iterator it = idIntervals.begin(); it != idIntervals.end(); ++it) {\n\t //std::cerr << \"merged\\t\" << geneIds[runningId] << \"\\t\" << it->lower() << \"\\t\" << it->upper() << std::endl; \n\t gRegions[refIndex].push_back(IntervalLabel(it->lower(), it->upper(), runningStrand, runningId));\n\t }\n\t idIntervals.clear();\n\t runningId = overlappingRegions[refIndex][i].lid;\n\t runningStrand = overlappingRegions[refIndex][i].strand;\n\t}\n\tidIntervals.insert(TIVal::right_open(overlappingRegions[refIndex][i].start, overlappingRegions[refIndex][i].end));\n }\n // Process last id\n for(typename TIdIntervals::iterator it = idIntervals.begin(); it != idIntervals.end(); ++it) gRegions[refIndex].push_back(IntervalLabel(it->lower(), it->upper(), runningStrand, runningId));\n }\n return geneIds.size();\n }\n\n template<typename TConfig, typename TGenomicRegions, typename TGeneIds>\n inline int32_t\n parseBED(TConfig const& c, TGenomicRegions& gRegions, TGeneIds& geneIds) {\n std::vector<bool> pCoding;\n return parseBED(c, gRegions, geneIds, pCoding);\n }\n \n\n}\n\n#endif\n" }, { "alpha_fraction": 0.7672672867774963, "alphanum_fraction": 0.8033033013343811, "avg_line_length": 46.57143020629883, "blob_id": "9bbd65c47da7d831f5f1ef7baf48f5cb9f6ade85", "content_id": "9c5966422c525e42af946496985bf09d6261aa2f", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 666, "license_type": "permissive", "max_line_length": 222, "num_lines": 14, "path": "/motif/downloadMotifs.sh", "repo_name": "tobiasrausch/bamStats", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# Raw motifs\ncurl --output jaspar.zip 'http://jaspar.genereg.net/download/CORE/JASPAR2020_CORE_vertebrates_non-redundant_pfms_jaspar.zip'\nunzip jaspar.zip\nrm jaspar.zip\ncat *.jaspar | gzip -c > jaspar.gz\nrm *.jaspar\n\n# Clustered motifs\ncurl --output JASPAR_2020_matrix_clustering_vertebrates_cluster_root_motifs.tf http://folk.uio.no/jamondra/JASPAR_2020_clusters/vertebrates/interactive_trees/JASPAR_2020_matrix_clustering_vertebrates_cluster_root_motifs.tf\npython ./convert.py -f JASPAR_2020_matrix_clustering_vertebrates_cluster_root_motifs.tf > jaspar.cluster\nrm JASPAR_2020_matrix_clustering_vertebrates_cluster_root_motifs.tf\ngzip jaspar.cluster\n" }, { "alpha_fraction": 0.7500905394554138, "alphanum_fraction": 0.7649402618408203, "avg_line_length": 43.17599868774414, "blob_id": "e5350095833478cd55f4d667b91de351c79a2018", "content_id": "64480819b1f651c744edeab273f6c47f0794316b", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 11044, "license_type": "permissive", "max_line_length": 719, "num_lines": 250, "path": "/docs/cli/README.md", "repo_name": "tobiasrausch/bamStats", "src_encoding": "UTF-8", "text": "# Usage\n\nAlfred uses subcommands for [quality control](#alignment-quality-control) ([qc](#alignment-quality-control)), [feature counting](#bam-feature-counting) ([count_dna](#bam-read-counting-for-dna-seq), [count_rna](#bam-read-counting-for-rna-seq), [count_jct](#bam-read-counting-for-rna-seq)), [feature annotation](#bam-feature-annotation) ([annotate](#chip-seq-or-atac-seq-peak-annotation), [tracks](#browser-tracks)), [alignment](#pairwise-sequence-alignment) ([pwalign](#pairwise-sequence-alignment), [consensus](#bam-consensus-computation)) and [haplotype-resolved analysis](#haplotype-specific-bam-files) ([split](#haplotype-specific-bam-files), [ase](#allele-specific-expression)). The subcommands are explained below.\n\n## Alignment Quality Control\n\nAlfred supports a command-line interface to run alignment quality control and a [web application](https://www.gear-genomics.com) can be used to render all QC metrics.\n\n## Command-line Interfact for BAM Quality Control\n\nAlfred computes various alignment metrics and summary statistics by read group\n\n```bash\nalfred qc -r <ref.fa> -o qc.tsv.gz <align.bam>\n```\n\nPlotting alignment statistics\n\n```bash\nRscript scripts/stats.R qc.tsv.gz\n```\n\nTo convert all the alignment metrics from column format to row format for readability\n\n```bash\nzgrep ^ME qc.tsv.gz | cut -f 2- | datamash transpose | column -t\n```\n\n## Interactive Quality Control Browser\n\nQuality control metrics can be browsed interactively using the [web front end of Alfred](https://www.gear-genomics.com/alfred).\n\n```bash\nalfred qc -r <ref.fa> -j qc.json.gz -o qc.tsv.gz <align.bam>\n```\n\nThen just upload the qc.json.gz file to the Alfred GUI [https://www.gear-genomics.com/alfred](https://www.gear-genomics.com/alfred). A convenient feature of the web-front end is that multiple samples can be uploaded and compared.\n\n\n## BAM Alignment Quality Control for Targeted Sequencing\n\nIf target regions are provided, Alfred computes the average coverage for each target and the on-target rate.\n\n```bash\nalfred qc -r <ref.fa> -b <targets.bed.gz> -o <qc.tsv.gz> <align.bam>\n```\n\nFor instance, for a human whole-exome data set.\n\n```bash\ncd maps/ && Rscript exon.R\nalfred qc -r <hg19.fa> -b maps/exonic.hg19.bed.gz -j qc.json.gz -o qc.tsv.gz <exome.bam>\nRscript scripts/stats.R qc.tsv.gz\n```\n\nAlternatively, one can use the [interactive GUI](https://www.gear-genomics.com/alfred) and upload the json file.\n\n```bash\nalfred qc -r <hg19.fa> -b maps/exonic.hg19.bed.gz -j qc.json.gz -o qc.tsv.gz <exome.bam>\n```\n\n\n## BAM Alignment Quality Control for ATAC-Seq\n\nFor ATAC-Seq data, the insert size distribution should reveal the DNA pitch and a clear nucleosome pattern with a peak for single nucleosomes and dimers. The transcription start site (TSS) enrichment should be >5 for a good ATAC-Seq library and ideally the duplicate rate is <20%, the alignment rate >70% and the standardized SD in coverage >0.3.\n\n```bash\ncd maps/ && Rscript promoter.R\nalfred qc -r <hg19.fa> -b maps/hg19.promoter.bed.gz -o qc.tsv.gz <atac.bam>\nRscript scripts/stats.R qc.tsv.gz\nzgrep ^ME qc.tsv.gz | datamash transpose | egrep \"^Dup|^MappedFraction|^SD|^Enrich\"\n```\n\nATAC-Seq libraries often have a large number of mitochondrial reads depending on the library preparation.\n\n```bash\nzgrep ^CM qc.tsv.gz | egrep \"Mapped|chrM\"\n```\n\nAlternatively, one can use the [interactive GUI](https://www.gear-genomics.com/alfred) and upload the json file.\n\n```bash\nalfred qc -r <hg19.fa> -b maps/hg19.promoter.bed.gz -j qc.json.gz -o qc.json.gz <atac.bam>\n```\n\n## BAM Feature Counting\n\nAlfred supports counting reads in overlapping or non-overlapping windows, at predefined intervals in BED format,\nor as gene and transcript counting for RNA-Seq in stranded or unstranded mode using a gtf or gff3 gene annotation\nfile. Expression values can be normalized as raw counts, FPKM, or FPKM-UQ values.\n\n## BAM Read Counting for DNA-Seq\n\nFor DNA sequencing, Alfred can be used to calculate the coverage in overlapping or non-overlapping windows or in given set of intervals.\n\n```bash\nalfred count_dna -o <cov.gz> <align.GRCh37.bam>\n```\n\nTo plot the whole-chromosome coverage profile for chr1-22 and chrX.\n\n```bash\nRscript scripts/rd.R <cov.gz>\n```\n\n## BAM Read Counting for RNA-Seq\n\nAlfred can also assign reads to gene annotation features from a GTF file such as counting reads by gene or transcript identifier.\n\n```bash\ncd gtf/ && ./downloadGTF.sh\nalfred count_rna -g gtf/Homo_sapiens.GRCh37.75.gtf.gz <align.GRCh37.bam>\n```\n\nAn experimental feature of Alfred is to count splice junction supporting reads. This method generates exon-exon junction counts for intra-gene exon-exon junctions, inter-gene exon-exon junctions and completely novel (not annotated) intra-chromosomal junctions.\n\n```bash\nalfred count_jct -g gtf/Homo_sapiens.GRCh37.75.gtf.gz <align.GRCh37.bam>\n```\n\n## BAM Feature Annotation\n\nAlfred supports annotation of ChIP-Seq and ATAC-Seq peaks for neighboring genes or transcription factor binding sites (based on motif alignments). Additionally, browser tracks in UCSC bedgraph format can be computed with configurable resolution.\n\n## ChIP-Seq or ATAC-Seq peak annotation\n\nTo annotate overlapping/neighboring genes up to a distance of 10,000bp:\n\n```bash\nalfred annotate -d 10000 -g gtf/Homo_sapiens.GRCh37.75.gtf.gz <peaks.bed>\n```\n\nThe two output files summarize nearby genes by peak and vice versa (peaks by gene).\n\n\n## Motif annotation\n\nMotif annotation of peaks based on alignment scores of motif-specific position weight matrices can also be obtained. \n\n```bash\ncd motif/ && ./downloadMotifs.sh\nalfred annotate -r <hg19.fa> -m motif/jaspar.gz <peaks.bed>\n```\n\nAlfred further implements functionality to output the exact motif hits in each peak to perform for instance transcription factor footprinting.\n\n```bash\nalfred annotate -p hits.gz -r <hg19.fa> -m motif/jaspar.gz <peaks.bed>\n```\n\n\n## Joined motif hits\n\nYou can also obtain all motif hits across a reference genome. For instance, to identify all hits for recombination signal sequences (RSS) you can use:\n\n```bash\ncat <hg19.fa.fai> | cut -f 1,2 | awk '{print $1\"\\t0\\t\"$2;}' > all.chrom.bed\nalfred annotate -m motif/rss.gz -r <hg19.fa> -q 0.9 -p motif.hits.gz all.chrom.bed\n```\n\nTo then join all motif hits of the conserved heptamer sequence (7bp), a spacer sequence (12bp or 23bp), and a conserved nonamer sequence (9bp) you can use:\n\n```bash\nalfred spaced_motif -l 11 -h 13 motif.hits.gz\n```\n\n\n\n## Browser Tracks\n\nNormalized and file size optimized browser tracks are essential to compare peak profiles across samples and upload them quickly in online genome browsers such as the [UCSC Genome Browser](https://genome.ucsc.edu/). Alfred generates browser tracks in [UCSC bedgraph format](https://genome.ucsc.edu/goldenpath/help/bedgraph.html) with configurable resolution. Lower resolution values leed to coarse-grained windows at reduced file size. Contrarily, high resolution values leed to fine-grained windows at greater file size.\n\n```bash\nalfred tracks -r 0.2 -o track.bedGraph.gz <aligned.bam>\n```\n\n[IGV tools](https://software.broadinstitute.org/software/igv/igvtools) can be used to convert bedgraph files to IGV's proprietory tdf format.\n\n```bash\nigvtools totdf track.bedGraph.gz track.tdf hg19\n```\n\nThis conversion enables comparing dozens of ATAC-Seq or ChIP-Seq samples at greatly improved speed in IGV compared to using the raw BAM files. By default the Alfred tracks command normalizes all input files to 30 million reads so peak heights are directly comparable across samples.\n\n## Pairwise Sequence Alignment\n\nAlfred supports global and local pairwise sequence alignments with configurable match scores and gap/mismatch penalties. Overlap and nested alignments can be achieved using penalty-free leading/trailing gaps. Affine gap penalties with separate gap open and gap extension penalties are supported.\n\n```bash\nalfred pwalign -a align.fa.gz <seq1.fasta> <seq2.fasta>\n```\n\nAll computed pairwise alignments are \"linear\" alignments that is the order of sequence nucleotides is preserved. For more complex pairwise alignments involving inversions or duplications you can use [Maze](https://www.gear-genomics.com/maze/).\n\n## BAM Consensus Computation\n\nAlfred supports consensus computation of error-prone long reads using multiple sequence alignment principles. To compute a consensus sequence for a set of long reads at a given alignment position:\n\n```bash\nalfred consensus -f bam -t ont -p chr1:218992200 <ont_pacbio.bam>\n```\n\nThe consensus method generates two output files. A simple fasta file with the consensus sequence and a FASTA align file that shows the multiple sequence alignment used for consensus generation in either horizontal or vertical format. The horizontal format is the classical FASTA align format. The vertical format transposes the horizontal alignment and in addition shows the consensus nucleotide for each alignment column after the vertical bar.\n\nThe consensus command is probably most useful by first separating haplotypes (Alfred's [split](#haplotype-specific-bam-files) subcommand) and then computing consensus sequences independently for each haplotype.\n\n```bash\nalfred split -r <ref.fa> -s NA12878 -p <haplotype1.bam> -q <haplotype2.bam> -v <phased.snps.bcf> <input.bam>\nalfred consensus -c <hap1.fa.gz> -f bam -t ont -p chr1:chr4:500500 <haplotype1.bam>\nalfred consensus -c <hap2.fa.gz> -f bam -t ont -p chr1:chr4:500500 <haplotype2.bam>\n```\n\nTo identify variants, you can then compare the two locally assembled haplotypes using our online dotplot method [Maze](https://www.gear-genomics.com/maze) or align them pairwise against each other using the subcommand [pwalign](#pairwise-sequence-alignment).\n\n\n## Haplotype-specific BAM files\n\nLong reads, Hi-C or novel single-cell sequencing technologies such as Strand-Seq enable (local) haplotyping or phasing of variants. Once such a phased SNP scaffold has been obtained one can split a BAM file into the corresponding haplotypes.\n\n```bash\nalfred split -r <ref.fa> -s NA12878 -p <haplotype1.bam> -q <haplotype2.bam> -v <phased.snps.bcf> <input.bam>\n```\n\n## Allele-specific expression\n\nAlfred implements methods to generate allele-specific expression tables. If the input SNPs are phased Alfred annotates the allele-specific expression haplotype-aware.\n\n```bash\nalfred ase -r <ref.fa> -s NA12878 -v <snps.bcf> -a <ase.tsv> <input.bam>\n```\n\n## Analysis of replication timing by NGS\n\nAlfred implements a method to analyze replication timing using next-generation sequencing (Repli-Seq). The order of BAM files on the command-line must follow the cell-cycle.\n\n```bash\nalfred replication -r <ref.fa> -o outprefix <g1b.bam> <s1.bam> <s2.bam> <s3.bam> <s4.bam> <g2.bam>\n```\n\nThere is a supporting script that plots the tag density for each cell-cycle fraction.\n\n```bash\nRscript scripts/reppattern.R -f outprefix.profile.tsv -r chr12:24000000-26000000\n```\n\nThere is also a script for plotting the replication time along a given chromosome.\n\n```bash\nRscript scripts/reptime.R -f outprefix.reptime.tsv -r chr12\n```\n" }, { "alpha_fraction": 0.5835855603218079, "alphanum_fraction": 0.5996835231781006, "avg_line_length": 32.0363655090332, "blob_id": "52f87afcf133c57a5a1c4e94cb27577635c4dff4", "content_id": "c1eb9fbc14819ae9037bdf5f0660545e68be5db1", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 14536, "license_type": "permissive", "max_line_length": 195, "num_lines": 440, "path": "/src/motif.h", "repo_name": "tobiasrausch/bamStats", "src_encoding": "UTF-8", "text": "#ifndef MOTIF_H\n#define MOTIF_H\n\n#include <limits>\n\n#include <boost/multi_array.hpp>\n#include <boost/dynamic_bitset.hpp>\n#include <boost/unordered_map.hpp>\n#include <boost/date_time/posix_time/posix_time.hpp>\n#include <boost/date_time/gregorian/gregorian.hpp>\n#include <boost/progress.hpp>\n#include <boost/iostreams/device/file.hpp>\n#include <boost/iostreams/filtering_stream.hpp>\n#include <boost/iostreams/filter/zlib.hpp>\n#include <boost/iostreams/filter/gzip.hpp>\n\n#include <htslib/sam.h>\n#include <htslib/faidx.h>\n\n#include \"util.h\"\n\nnamespace bamstats\n{\n\n struct Pfm {\n typedef boost::multi_array<int32_t, 2> T2DArray;\n T2DArray matrix;\n\n std::string matrixId;\n std::string symbol;\n };\n\n struct Pwm {\n typedef boost::multi_array<double, 2> T2DArray;\n T2DArray matrix;\n\n std::string matrixId;\n std::string symbol;\n };\n\n inline double\n _minScore(Pwm const& pwm) {\n double ms = 0;\n for(uint32_t j = 0; j < pwm.matrix.shape()[1]; ++j) {\n double minJ = pwm.matrix[0][j];\n for(uint32_t i = 1; i < 4; ++i) {\n\tif (pwm.matrix[i][j] < minJ) minJ = pwm.matrix[i][j];\n }\n ms += minJ;\n }\n return ms;\n }\n\n inline double\n _maxScore(Pwm const& pwm) {\n double ms = 0;\n for(uint32_t j = 0; j < pwm.matrix.shape()[1]; ++j) {\n double maxJ = pwm.matrix[0][j];\n for(uint32_t i = 1; i < 4; ++i) {\n\tif (pwm.matrix[i][j] > maxJ) maxJ = pwm.matrix[i][j];\n }\n ms += maxJ;\n }\n return ms;\n }\n\n inline std::string\n _maxSimpleMotif(Pwm const& pwm) {\n std::string motif;\n for(uint32_t j = 0; j < pwm.matrix.shape()[1]; ++j) {\n double maxJ = pwm.matrix[0][j];\n uint32_t maxI = 0;\n for(uint32_t i = 1; i < 4; ++i) {\n\tif (pwm.matrix[i][j] > maxJ) {\n\t maxJ = pwm.matrix[i][j];\n\t maxI = i;\n\t}\n }\n if (maxI == 0) motif += 'A';\n else if (maxI == 1) motif += 'C';\n else if (maxI == 2) motif += 'G';\n else if (maxI == 3) motif += 'T';\n }\n return motif;\n }\n\n inline void\n scale(Pwm& pwm) {\n double minsc = _minScore(pwm);\n double maxsc = _maxScore(pwm);\n double cols = pwm.matrix.shape()[1];\n for(uint32_t i = 0; i < 4; ++i) {\n for(uint32_t j = 0; j < pwm.matrix.shape()[1]; ++j) {\n\tpwm.matrix[i][j] = ((pwm.matrix[i][j] - minsc / cols) / (maxsc - minsc));\n }\n }\n }\n \n inline void\n convert(Pfm const& pfm, Pwm& pwm, std::vector<double> const& bg, double const pc) {\n pwm.matrixId = pfm.matrixId;\n pwm.symbol = pfm.symbol;\n pwm.matrix.resize(boost::extents[4][pfm.matrix.shape()[1]]);\n double totalBg = 0;\n for(uint32_t i = 0; i < 4; ++i) totalBg += bg[i];\n for(uint32_t j = 0; j < pwm.matrix.shape()[1]; ++j) {\n int32_t total = 0;\n for(uint32_t i = 0; i < 4; ++i) total += pfm.matrix[i][j];\n for(uint32_t i = 0; i < 4; ++i) {\n\tpwm.matrix[i][j] = ((double) pfm.matrix[i][j] + bg[i] * pc) / ((double) total + totalBg * pc);\n\tpwm.matrix[i][j] = std::log(pwm.matrix[i][j] / (bg[i] / totalBg)) / log(2);\n }\n }\n }\n\n inline void\n convert(Pfm const& pfm, Pwm& pwm, double const pc) {\n std::vector<double> bg;\n bg.push_back(0.25);\n bg.push_back(0.25);\n bg.push_back(0.25);\n bg.push_back(0.25);\n convert(pfm, pwm, bg, pc);\n }\n\n inline void\n convert(Pfm const& pfm, Pwm& pwm) {\n convert(pfm, pwm, 0.8);\n }\n \n template<typename TPositionMatrix>\n inline void\n revComp(TPositionMatrix const& pfm, TPositionMatrix& out) {\n out.matrixId = pfm.matrixId;\n out.symbol = pfm.symbol;\n out.matrix.resize(boost::extents[4][pfm.matrix.shape()[1]]);\n for(uint32_t i = 0; i < 4; ++i) {\n uint32_t r = out.matrix.shape()[1] - 1;\n for(uint32_t j = 0; j < out.matrix.shape()[1]; ++j, --r) {\n\tout.matrix[(3-i)][r] = pfm.matrix[i][j];\n }\n }\n }\n\n template<typename TConfig, typename TBitSet, typename TMotifHits>\n inline void\n scorePwm(TConfig const& c, char const* seq, TBitSet const& evalPos, Pwm const& inpwm, std::string const& tname, TMotifHits& mh, boost::iostreams::filtering_ostream& dataOut) {\n Pwm pwmFwd(inpwm);\n scale(pwmFwd);\n Pwm pwmRev;\n revComp(inpwm, pwmRev);\n scale(pwmRev);\n int32_t motiflen = pwmFwd.matrix.shape()[1];\n int32_t lastHit = -(motiflen + 1);\n //std::cerr << _minScore(pwmFwd) << ',' << _maxScore(pwmFwd) << std::endl;\n //std::cerr << _minScore(pwmRev) << ',' << _maxScore(pwmRev) << std::endl;\n \n // Parse sequence\n for(uint32_t pos = 0; pos < evalPos.size() - motiflen + 1; ++pos) {\n if (evalPos[pos]) {\n\tstd::string ref = boost::to_upper_copy(std::string(seq + pos, seq + pos + motiflen));\n\tdouble scoreFwd = 0;\n\tdouble scoreRev = 0;\n\tint32_t k = 0;\n\tfor(; k < motiflen; ++k) {\n\t if (evalPos[pos+k]) {\n\t int32_t n = 4;\n\t if (ref[k] == 'A') n = 0;\n\t else if (ref[k] == 'C') n = 1;\n\t else if (ref[k] == 'G') n = 2;\n\t else if (ref[k] == 'T') n = 3;\n\t if (n < 4) {\n\t scoreFwd += pwmFwd.matrix[n][k];\n\t scoreRev += pwmRev.matrix[n][k];\n\t } else break;\n\t } else break;\n\t}\n\tif ((k == motiflen) && ((scoreFwd > c.motifScoreQuantile) || (scoreRev > c.motifScoreQuantile))) {\n\t //std::cerr << \"Genom:\" << ref << \",\" << ref << std::endl;\n\t //std::cerr << \"Query:\" << _maxSimpleMotif(pwmFwd) << \",\" << _maxSimpleMotif(pwmRev) << std::endl;\n\t if ((c.overlappingHits) || (lastHit + motiflen < (int32_t) pos)) {\n\t mh.push_back(pos);\n\t lastHit = pos;\n\t if (c.motifPosOut) {\n\t if (scoreFwd > c.motifScoreQuantile) {\n\t\tdataOut << tname << \"\\t\" << (pos + 1) << \"\\t\" << pos + motiflen << \"\\t\" << inpwm.symbol << \"\\t+\\t\" << scoreFwd << std::endl;\n\t }\n\t if (scoreRev > c.motifScoreQuantile) {\n\t\tdataOut << tname << \"\\t\" << (pos + 1) << \"\\t\" << pos + motiflen << \"\\t\" << inpwm.symbol << \"\\t-\\t\" << scoreRev << std::endl;\n\t }\n\t }\n\t }\n\t}\n }\n }\n }\n\n template<typename TConfig>\n inline bool\n parseJasparPfm(TConfig const& c, std::vector<Pfm>& pfms) {\n // Check gzip\n if (!is_gz(c.motifFile)) {\n std::cerr << \"JASPAR file is not gzipped!\" << std::endl;\n return false;\n }\n\n // Parse JASPAR\n std::ifstream file(c.motifFile.string().c_str(), std::ios_base::in | std::ios_base::binary);\n boost::iostreams::filtering_streambuf<boost::iostreams::input> dataIn;\n dataIn.push(boost::iostreams::gzip_decompressor());\n dataIn.push(file);\n std::istream instream(&dataIn);\n std::string gline;\n int32_t acgt = 0;\n int32_t id = 0;\n while(std::getline(instream, gline)) {\n // Header\n if ((gline.size()) && (gline[0] == '>')) {\n\tid = pfms.size();\n\tpfms.resize(id+1);\n\tgline = gline.substr(1);\n\ttypedef boost::tokenizer< boost::char_separator<char> > Tokenizer;\n\tboost::char_separator<char> sep(\" \\t\");\n\tTokenizer tokens(gline, sep);\n\tTokenizer::iterator tokIter = tokens.begin();\n\tif (tokIter != tokens.end()) {\n\t pfms[id].matrixId = *tokIter++;\n\t pfms[id].symbol = \"NA\";\n\t if (tokIter != tokens.end()) {\n\t pfms[id].symbol = *tokIter++;\n\t }\n\t}\n\tacgt = 0;\n } else {\n\tif ((gline.size()) && ((gline[0] == 'A') || (gline[0] == 'C') || (gline[0] == 'G') || (gline[0] == 'T'))) {\n\t // JASPAR format\n\t typedef boost::tokenizer< boost::char_separator<char> > Tokenizer;\n\t boost::char_separator<char> sep(\"[\");\n\t Tokenizer tokens(gline, sep);\n\t Tokenizer::iterator tokIter = tokens.begin();\n\t if ((tokIter!=tokens.end()) && (++tokIter != tokens.end())) {\n\t gline = *tokIter;\n\t boost::char_separator<char> sep2(\"]\");\n\t Tokenizer tokens2(gline, sep2);\n\t Tokenizer::iterator tokIter2 = tokens2.begin();\n\t if (tokIter2 != tokens2.end()) {\n\t gline = *tokIter2;\n\t } else {\n\t std::cerr << \"JASPAR cannot be parsed!\" << std::endl;\n\t return false;\n\t }\n\t } else {\n\t std::cerr << \"JASPAR cannot be parsed!\" << std::endl;\n\t return false;\n\t }\n\t}\n\n\ttypedef boost::tokenizer< boost::char_separator<char> > Tokenizer;\n\tboost::char_separator<char> sep(\" \\t\");\n\tTokenizer tokens(gline, sep);\n\tif (acgt == 0) { \n\t int32_t lenMotif = 0;\n\t for(Tokenizer::iterator tokIter = tokens.begin(); tokIter!=tokens.end(); ++tokIter) ++lenMotif;\n\t pfms[id].matrix.resize(boost::extents[4][lenMotif]);\n\t}\n\tuint32_t col = 0;\n\tfor(Tokenizer::iterator tokIter = tokens.begin(); tokIter!=tokens.end(); ++tokIter, ++col) pfms[id].matrix[acgt][col] = boost::lexical_cast<int32_t>(*tokIter);\n\n\t// Debug code\n\t//if (acgt == 3) {\n\t//std::cout << \">\" << pfms[id].matrixId << ',' << pfms[id].symbol << std::endl;\n\t//for(uint32_t i = 0; i < pfms[id].matrix.shape()[0]; ++i) {\n\t// for(uint32_t j = 0; j < pfms[id].matrix.shape()[1]; ++j) {\n\t// std::cerr << pfms[id].matrix[i][j] << ',';\n\t// }\n\t// std::cerr << std::endl;\n\t//}\n\t//}\n\t\n\t++acgt;\n }\n\n }\n dataIn.pop();\n return true;\n }\n\n template<typename TConfig>\n inline bool\n parseJasparPwm(TConfig const& c, std::vector<Pwm>& pwms) {\n std::vector<Pfm> pfms;\n if (!parseJasparPfm(c, pfms)) return false;\n pwms.resize(pfms.size(), Pwm());\n for(uint32_t i = 0; i < pfms.size(); ++i) {\n convert(pfms[i], pwms[i]);\n }\n return true;\n }\n\n template<typename TConfig, typename TGenomicRegions, typename TMotifIds>\n inline int32_t\n parseJasparAll(TConfig const& c, TGenomicRegions& overlappingRegions, TMotifIds& motifIds) {\n boost::posix_time::ptime now = boost::posix_time::second_clock::local_time();\n std::cout << '[' << boost::posix_time::to_simple_string(now) << \"] \" << \"Motif file parsing\" << std::endl;\n\n // Generate PWMs\n std::vector<Pwm> pwms;\n parseJasparPwm(c, pwms);\n\n // Motif search\n now = boost::posix_time::second_clock::local_time();\n std::cout << '[' << boost::posix_time::to_simple_string(now) << \"] \" << \"Motif search\" << std::endl;\n boost::progress_display show_progress(c.nchr.size());\n\n // Output motif positions\n boost::iostreams::filtering_ostream dataOut;\n if (c.motifPosOut) {\n dataOut.push(boost::iostreams::gzip_compressor());\n dataOut.push(boost::iostreams::file_sink(c.outpos.string().c_str(), std::ios_base::out | std::ios_base::binary));\n dataOut << \"chr\\tstart\\tend\\tid\\tstrand\\tquantile\" << std::endl;\n }\n \n // Iterate chromosomes\n faidx_t* fai = fai_load(c.genome.string().c_str());\n char* seq = NULL;\n for(int32_t refIndex=0; refIndex < (int32_t) c.nchr.size(); ++refIndex) {\n ++show_progress;\n\n // Chromosome name and length\n std::string tname = \"NA\";\n for(typename TConfig::TChrMap::const_iterator itChr = c.nchr.begin(); itChr != c.nchr.end(); ++itChr) {\n\tif (refIndex == itChr->second) {\n\t tname = itChr->first;\n\t}\n }\n int32_t seqlen = faidx_seq_len(fai, tname.c_str());\n\n // Pre-process bed file so we can speed-up motif search\n typedef boost::dynamic_bitset<> TBitSet;\n TBitSet evalPos(seqlen, false);\n std::ifstream chrFile(c.infile.string().c_str(), std::ifstream::in);\n if (chrFile.is_open()) {\n\twhile (chrFile.good()) {\n\t std::string chrFromFile;\n\t getline(chrFile, chrFromFile);\n\t typedef boost::tokenizer< boost::char_separator<char> > Tokenizer;\n\t boost::char_separator<char> sep(\" \\t,;\");\n\t Tokenizer tokens(chrFromFile, sep);\n\t Tokenizer::iterator tokIter = tokens.begin();\n\t if (tokIter!=tokens.end()) {\n\t std::string chrName = *tokIter++;\n\t if (c.nchr.find(chrName)->second != refIndex) continue;\n\t int32_t start = boost::lexical_cast<int32_t>(*tokIter++);\n\t int32_t end = boost::lexical_cast<int32_t>(*tokIter++);\n\t std::string name = \"NA\";\n\t if (start >= end) continue; // Bed has right-open intervals\n\t int32_t realstart = std::max(0, start - c.maxDistance);\n int32_t realend = std::min(seqlen, end + c.maxDistance);\n\t for(int32_t i = realstart; i < realend; ++i) evalPos[i] = true;\n\t }\n\t}\n\tchrFile.close();\n }\n\n // Anything to annotate on this chromosome?\n if (evalPos.count()) {\n\tseqlen = -1;\n\tseq = faidx_fetch_seq(fai, tname.c_str(), 0, faidx_seq_len(fai, tname.c_str()) + 1, &seqlen);\n\n\t// Blacklist Ns\n\tfor(int32_t i = 0; i < seqlen; ++i) {\n\t if ((seq[i] == 'n') || (seq[i] == 'N')) evalPos[i] = false;\n\t}\n\t\n\t// Score PWMs\n\tfor(uint32_t i = 0; i<pwms.size(); ++i) {\n\t typedef std::vector<int32_t> TMotifHits;\n\t TMotifHits mh;\n\t scorePwm(c, seq, evalPos, pwms[i], tname, mh, dataOut);\n\n\t int32_t motiflen = pwms[i].matrix.shape()[1];\n\t for(uint32_t hit = 0; hit < mh.size(); ++hit) {\n\t _insertInterval(overlappingRegions[refIndex], mh[hit], mh[hit] + motiflen, '*', i, 0);\n\t }\n\t}\n\n\t// Clean-up\n\tif (seq != NULL) free(seq);\n }\n }\n // Close gzipped motif positions\n if (c.motifPosOut) dataOut.pop();\n \n // Assign Motif Ids\n for(uint32_t i = 0; i<pwms.size(); ++i) motifIds.push_back(pwms[i].symbol);\n\n return motifIds.size();\n }\n\n \n template<typename TConfig, typename TGenomicRegions, typename TMotifIds>\n inline int32_t\n parseJaspar(TConfig const& c, TGenomicRegions& gRegions, TMotifIds& motifIds) {\n typedef typename TGenomicRegions::value_type TChromosomeRegions;\n\n // Overlapping intervals for each label\n TGenomicRegions overlappingRegions;\n overlappingRegions.resize(gRegions.size(), TChromosomeRegions());\n parseJasparAll(c, overlappingRegions, motifIds);\n \n // Make intervals non-overlapping for each label\n for(uint32_t refIndex = 0; refIndex < overlappingRegions.size(); ++refIndex) {\n // Sort by ID\n std::sort(overlappingRegions[refIndex].begin(), overlappingRegions[refIndex].end(), SortIntervalLabel<IntervalLabel>());\n int32_t runningId = -1;\n char runningStrand = '*';\n typedef boost::icl::interval_set<uint32_t> TIdIntervals;\n typedef typename TIdIntervals::interval_type TIVal;\n TIdIntervals idIntervals;\n for(uint32_t i = 0; i < overlappingRegions[refIndex].size(); ++i) {\n\tif (overlappingRegions[refIndex][i].lid != runningId) {\n\t for(typename TIdIntervals::iterator it = idIntervals.begin(); it != idIntervals.end(); ++it) {\n\t gRegions[refIndex].push_back(IntervalLabel(it->lower(), it->upper(), runningStrand, runningId));\n\t }\n\t idIntervals.clear();\n\t runningId = overlappingRegions[refIndex][i].lid;\n\t runningStrand = overlappingRegions[refIndex][i].strand;\n\t}\n\tidIntervals.insert(TIVal::right_open(overlappingRegions[refIndex][i].start, overlappingRegions[refIndex][i].end));\n }\n // Process last id\n for(typename TIdIntervals::iterator it = idIntervals.begin(); it != idIntervals.end(); ++it) gRegions[refIndex].push_back(IntervalLabel(it->lower(), it->upper(), runningStrand, runningId));\n }\n\n return motifIds.size();\n }\n \n}\n\n#endif\n" }, { "alpha_fraction": 0.6064925789833069, "alphanum_fraction": 0.6142788529396057, "avg_line_length": 32.25896453857422, "blob_id": "e9ecd062a1b461340786f253dbae7adaa325c05e", "content_id": "862e57864a66a9f69f9c336d24868d0c08e57110", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 8348, "license_type": "permissive", "max_line_length": 176, "num_lines": 251, "path": "/src/bam2match.h", "repo_name": "tobiasrausch/bamStats", "src_encoding": "UTF-8", "text": "#ifndef BAM2MATCH_H\n#define BAM2MATCH_H\n\n#include <iostream>\n#include <vector>\n#include <fstream>\n\n#include <boost/program_options/cmdline.hpp>\n#include <boost/program_options/options_description.hpp>\n#include <boost/program_options/parsers.hpp>\n#include <boost/program_options/variables_map.hpp>\n#include <boost/date_time/posix_time/posix_time.hpp>\n#include <boost/date_time/gregorian/gregorian.hpp>\n#include <boost/iostreams/device/file.hpp>\n#include <boost/iostreams/stream.hpp>\n#include <boost/iostreams/stream_buffer.hpp>\n#include <boost/iostreams/filtering_stream.hpp>\n#include <boost/iostreams/filter/zlib.hpp>\n#include <boost/iostreams/filter/gzip.hpp>\n#include <boost/random.hpp>\n#include <boost/generator_iterator.hpp>\n#include <boost/tuple/tuple.hpp>\n#include <boost/filesystem.hpp>\n#include <boost/progress.hpp>\n\n#include <htslib/faidx.h>\n#include <htslib/sam.h>\n#include <htslib/vcf.h>\n\n#include \"util.h\"\n#include \"variants.h\"\n\nnamespace bamstats\n{\n\n struct Bam2MatchConfig {\n uint16_t minMapQual;\n boost::filesystem::path genome;\n boost::filesystem::path outfile;\n boost::filesystem::path bamfile;\n };\n\n template<typename TConfig>\n inline int32_t\n bam2MatchRun(TConfig const& c) {\n\n#ifdef PROFILE\n ProfilerStart(\"alfred.prof\");\n#endif\n\n // Load bam files\n samFile* samfile = sam_open(c.bamfile.string().c_str(), \"r\");\n hts_set_fai_filename(samfile, c.genome.string().c_str());\n hts_idx_t* idx = sam_index_load(samfile, c.bamfile.string().c_str());\n bam_hdr_t* hdr = sam_hdr_read(samfile);\n\n // Data out\n boost::iostreams::filtering_ostream dataOut;\n dataOut.push(boost::iostreams::gzip_compressor());\n dataOut.push(boost::iostreams::file_sink(c.outfile.string().c_str(), std::ios_base::out | std::ios_base::binary));\n dataOut << \"chr\\trefstart\\trefend\\tread\\treadstart\\treadend\\tdirection\" << std::endl;\n \n boost::posix_time::ptime now = boost::posix_time::second_clock::local_time();\n std::cout << '[' << boost::posix_time::to_simple_string(now) << \"] \" << \"Extract matches\" << std::endl;\n boost::progress_display show_progress(hdr->n_targets);\n\n // Parse BAM alignments\n int32_t refIndex = -1;\n char* seq = NULL;\n faidx_t* fai = fai_load(c.genome.string().c_str());\n bam1_t* rec = bam_init1();\n while (sam_read1(samfile, hdr, rec) >= 0) {\n if (rec->core.flag & (BAM_FQCFAIL | BAM_FDUP | BAM_FUNMAP)) continue;\n if ((rec->core.qual < c.minMapQual) || (rec->core.tid<0)) continue;\n\n // New chromosome?\n if (rec->core.tid != refIndex) {\n\t++show_progress;\n\tif (refIndex != -1) {\n\t if (seq != NULL) free(seq);\n\t}\n\trefIndex = rec->core.tid;\n\t// Load chromosome\n\tint32_t seqlen = -1;\n\tstd::string tname(hdr->target_name[refIndex]);\n\tseq = faidx_fetch_seq(fai, tname.c_str(), 0, hdr->target_len[refIndex], &seqlen);\n }\n \n // Get read sequence\n std::string sequence;\n sequence.resize(rec->core.l_qseq);\n uint8_t* seqptr = bam_get_seq(rec);\n for (int32_t i = 0; i < rec->core.l_qseq; ++i) sequence[i] = \"=ACMGRSVTWYHKDBN\"[bam_seqi(seqptr, i)];\n\t \n // Parse CIGAR\n uint32_t* cigar = bam_get_cigar(rec);\n int32_t gp = rec->core.pos; // Genomic position\n int32_t gpStart = -1; //Match start\n int32_t gpEnd = -1; //Match end\n int32_t sp = 0; // Sequence position\n int32_t seqStart = -1; // Match start\n int32_t seqEnd = -1; // Match end\n for (std::size_t i = 0; i < rec->core.n_cigar; ++i) {\n\tif ((bam_cigar_op(cigar[i]) == BAM_CMATCH) || (bam_cigar_op(cigar[i]) == BAM_CEQUAL) || (bam_cigar_op(cigar[i]) == BAM_CDIFF)) {\n\t if (seqStart == -1) {\n\t seqStart = sp;\n\t gpStart = gp;\n\t }\n\t gp += bam_cigar_oplen(cigar[i]);\n\t sp += bam_cigar_oplen(cigar[i]);\n\t seqEnd = sp;\n\t gpEnd = gp;\n\t} else if (bam_cigar_op(cigar[i]) == BAM_CINS) {\n\t if (seqStart == -1) {\n\t seqStart = sp;\n\t gpStart = gp;\n\t }\n\t sp += bam_cigar_oplen(cigar[i]);\n\t seqEnd = sp;\n\t gpEnd = gp;\n\t} else if (bam_cigar_op(cigar[i]) == BAM_CDEL) {\n\t if (seqStart == -1) {\n\t seqStart = sp;\n\t gpStart = gp;\n\t }\n\t gp += bam_cigar_oplen(cigar[i]);\n\t seqEnd = sp;\n\t gpEnd = gp;\n\t} else if (bam_cigar_op(cigar[i]) == BAM_CSOFT_CLIP) {\n\t sp += bam_cigar_oplen(cigar[i]);\n\t} else if (bam_cigar_op(cigar[i]) == BAM_CREF_SKIP) {\n\t gp += bam_cigar_oplen(cigar[i]);\n\t} else if (bam_cigar_op(cigar[i]) == BAM_CHARD_CLIP) {\n\t sp += bam_cigar_oplen(cigar[i]);\n\t} else {\n\t std::cerr << \"Unknown Cigar options\" << std::endl;\n\t return 1;\n\t}\n }\n std::string dir = \"fwd\";\n if (rec->core.flag & BAM_FREVERSE) {\n\tdir = \"rev\";\n\tint32_t seqTmp = seqStart;\n\tseqStart = sp - seqEnd;\n\tseqEnd = sp - seqTmp;\n }\n dataOut << hdr->target_name[refIndex] << '\\t' << gpStart << '\\t' << gpEnd << '\\t' << bam_get_qname(rec) << '\\t' << seqStart << '\\t' << seqEnd << '\\t' << dir << std::endl;\n }\n // Close output file\n dataOut.pop();\n dataOut.pop();\n \n // Clean-up\n bam_destroy1(rec);\n if (seq != NULL) free(seq);\n fai_destroy(fai);\n \n // Close bam\n bam_hdr_destroy(hdr);\n hts_idx_destroy(idx);\n sam_close(samfile);\n \n // End\n now = boost::posix_time::second_clock::local_time();\n std::cout << '[' << boost::posix_time::to_simple_string(now) << \"] Done.\" << std::endl;\n\n#ifdef PROFILE\n ProfilerStop();\n#endif\n \n return 0;\n }\n\n int bam2match(int argc, char **argv) {\n Bam2MatchConfig c;\n\n // Parameter\n boost::program_options::options_description generic(\"Generic options\");\n generic.add_options()\n (\"help,?\", \"show help message\")\n (\"map-qual,m\", boost::program_options::value<unsigned short>(&c.minMapQual)->default_value(0), \"min. mapping quality\")\n (\"reference,r\", boost::program_options::value<boost::filesystem::path>(&c.genome), \"reference fasta file\")\n (\"outfile,o\", boost::program_options::value<boost::filesystem::path>(&c.outfile)->default_value(\"match.gz\"), \"gzipped output file\")\n ;\n\n boost::program_options::options_description hidden(\"Hidden options\");\n hidden.add_options()\n (\"input-file\", boost::program_options::value<boost::filesystem::path>(&c.bamfile), \"input bam file\")\n ;\n\n boost::program_options::positional_options_description pos_args;\n pos_args.add(\"input-file\", -1);\n\n boost::program_options::options_description cmdline_options;\n cmdline_options.add(generic).add(hidden);\n boost::program_options::options_description visible_options;\n visible_options.add(generic);\n boost::program_options::variables_map vm;\n boost::program_options::store(boost::program_options::command_line_parser(argc, argv).options(cmdline_options).positional(pos_args).run(), vm);\n boost::program_options::notify(vm);\n\n \n // Check command line arguments\n if ((vm.count(\"help\")) || (!vm.count(\"input-file\")) || (!vm.count(\"reference\"))) {\n std::cout << std::endl;\n std::cout << \"Usage: alfred \" << argv[0] << \" [OPTIONS] -r <ref.fa> <contig.bam>\" << std::endl;\n std::cout << visible_options << \"\\n\";\n return 1;\n }\n\n // Check input BAM file\n if (vm.count(\"input-file\")) {\n if (!(boost::filesystem::exists(c.bamfile) && boost::filesystem::is_regular_file(c.bamfile) && boost::filesystem::file_size(c.bamfile))) {\n\tstd::cerr << \"Input BAM file is missing: \" << c.bamfile.string() << std::endl;\n\treturn 1;\n }\n samFile* samfile = sam_open(c.bamfile.string().c_str(), \"r\");\n if (samfile == NULL) {\n\tstd::cerr << \"Fail to open file \" << c.bamfile.string() << std::endl;\n\treturn 1;\n }\n hts_idx_t* idx = sam_index_load(samfile, c.bamfile.string().c_str());\n if (idx == NULL) {\n\tstd::cerr << \"Fail to open index for \" << c.bamfile.string() << std::endl;\n\treturn 1;\n }\n bam_hdr_t* hdr = sam_hdr_read(samfile);\n if (hdr == NULL) {\n\tstd::cerr << \"Fail to open header for \" << c.bamfile.string() << std::endl;\n\treturn 1;\n }\n bam_hdr_destroy(hdr);\n hts_idx_destroy(idx);\n sam_close(samfile);\n }\n \n // Show cmd\n boost::posix_time::ptime now = boost::posix_time::second_clock::local_time();\n std::cout << '[' << boost::posix_time::to_simple_string(now) << \"] \";\n std::cout << \"alfred \";\n for(int i=0; i<argc; ++i) { std::cout << argv[i] << ' '; }\n std::cout << std::endl;\n \n return bam2MatchRun(c);\n}\n\n\n\n}\n\n#endif\n" }, { "alpha_fraction": 0.5570651888847351, "alphanum_fraction": 0.5729813575744629, "avg_line_length": 33.57718276977539, "blob_id": "92e048e5ab742b9918ebe876aba6e11eaf93a690", "content_id": "d6d924412f9aa435f25369920f77fb8717447131", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5152, "license_type": "permissive", "max_line_length": 165, "num_lines": 149, "path": "/src/alfred.cpp", "repo_name": "tobiasrausch/bamStats", "src_encoding": "UTF-8", "text": "#define _SECURE_SCL 0\n#define _SCL_SECURE_NO_WARNINGS\n#include <iostream>\n#include <vector>\n#include <fstream>\n\n#define BOOST_DISABLE_ASSERTS\n#include <boost/program_options/cmdline.hpp>\n#include <boost/program_options/options_description.hpp>\n#include <boost/program_options/parsers.hpp>\n#include <boost/program_options/variables_map.hpp>\n#include <boost/date_time/posix_time/posix_time.hpp>\n#include <boost/date_time/gregorian/gregorian.hpp>\n#include <boost/tokenizer.hpp>\n#include <boost/filesystem.hpp>\n#include <boost/progress.hpp>\n\n#include <htslib/sam.h>\n#include <htslib/faidx.h>\n\n#ifdef PROFILE\n#include \"gperftools/profiler.h\"\n#endif\n\n\n#include \"version.h\"\n#include \"util.h\"\n#include \"bam2match.h\"\n#include \"bamstats.h\"\n#include \"count_rna.h\"\n#include \"count_dna.h\"\n#include \"count_junction.h\"\n#include \"annotate.h\"\n#include \"tracks.h\"\n#include \"split.h\"\n#include \"ase.h\"\n#include \"qc.h\"\n#include \"consensus.h\"\n#include \"pwalign.h\"\n#include \"spaced.h\"\n#include \"repliseq.h\"\n\nusing namespace bamstats;\n\n\ninline void\nasciiArt() {\n std::cout << \" _ __ _ \" << std::endl;\n std::cout << \" /\\\\ | |/ _| | |\" << std::endl;\n std::cout << \" / \\\\ | | |_ _ __ ___ __| |\" << std::endl;\n std::cout << \" / /\\\\ \\\\ | | _| '__/ _ \\\\/ _` |\" << std::endl;\n std::cout << \" / ____ \\\\| | | | | | __/ (_| |\" << std::endl;\n std::cout << \" /_/ \\\\_\\\\_|_| |_| \\\\___|\\\\__,_|\" << std::endl;\n std::cout << std::endl;\n}\n\ninline void\ndisplayUsage() {\n std::cout << \"Usage: alfred <command> <arguments>\" << std::endl;\n std::cout << std::endl;\n std::cout << \"Commands:\" << std::endl;\n std::cout << std::endl;\n std::cout << \" qc alignment quality control\" << std::endl;\n std::cout << \" count_dna counting DNA reads in windows\" << std::endl;\n std::cout << \" count_rna counting RNA reads in features\" << std::endl;\n std::cout << \" count_jct counting RNA split-reads at exon junctions\" << std::endl;\n std::cout << \" tracks create browser tracks\" << std::endl;\n std::cout << \" annotate annotate peaks\" << std::endl;\n std::cout << \" spaced_motif find spaced motifs\" << std::endl;\n std::cout << \" split split BAM into haplotypes\" << std::endl;\n std::cout << \" consensus consensus computation for error-prone reads\" << std::endl;\n std::cout << \" pwalign pairwise alignment using dynamic programming\" << std::endl;\n std::cout << \" bam2match convert contig alignments in BAM format to pairwise matches\" << std::endl; \n std::cout << \" ase allele-specific expression\" << std::endl;\n std::cout << \" replication replication timing (Repli-Seq)\" << std::endl;\n std::cout << std::endl;\n std::cout << std::endl;\n}\n\n\nint main(int argc, char **argv) {\n if (argc < 2) {\n asciiArt();\n printTitle(\"Alfred\");\n displayUsage();\n return 0;\n }\n \n if ((std::string(argv[1]) == \"version\") || (std::string(argv[1]) == \"--version\") || (std::string(argv[1]) == \"--version-only\") || (std::string(argv[1]) == \"-v\")) {\n std::cout << \"Alfred version: v\" << alfredVersionNumber << std::endl;\n std::cout << \" using Boost: v\" << BOOST_VERSION / 100000 << \".\" << BOOST_VERSION / 100 % 1000 << \".\" << BOOST_VERSION % 100 << std::endl;\n std::cout << \" using HTSlib: v\" << hts_version() << std::endl;\n return 0;\n }\n else if ((std::string(argv[1]) == \"help\") || (std::string(argv[1]) == \"--help\") || (std::string(argv[1]) == \"-h\") || (std::string(argv[1]) == \"-?\")) {\n printTitle(\"Alfred\");\n displayUsage();\n return 0;\n }\n else if ((std::string(argv[1]) == \"warranty\") || (std::string(argv[1]) == \"--warranty\") || (std::string(argv[1]) == \"-w\")) {\n displayWarranty();\n return 0;\n }\n else if ((std::string(argv[1]) == \"license\") || (std::string(argv[1]) == \"--license\") || (std::string(argv[1]) == \"-l\")) {\n bsd();\n return 0;\n }\n else if ((std::string(argv[1]) == \"qc\")) {\n return qc(argc-1,argv+1);\n }\n else if ((std::string(argv[1]) == \"count_rna\")) {\n return count_rna(argc-1,argv+1);\n }\n else if ((std::string(argv[1]) == \"count_dna\")) {\n return count_dna(argc-1,argv+1);\n }\n else if ((std::string(argv[1]) == \"count_jct\")) {\n return count_junction(argc-1,argv+1);\n }\n else if ((std::string(argv[1]) == \"tracks\")) {\n return tracks(argc-1,argv+1);\n }\n else if ((std::string(argv[1]) == \"annotate\")) {\n return annotate(argc-1,argv+1);\n }\n else if ((std::string(argv[1]) == \"spaced_motif\")) {\n return spaced(argc-1,argv+1);\n }\n else if ((std::string(argv[1]) == \"split\")) {\n return split(argc-1,argv+1);\n }\n else if ((std::string(argv[1]) == \"consensus\")) {\n return consensus(argc-1,argv+1);\n }\n else if ((std::string(argv[1]) == \"pwalign\")) {\n return pwalign(argc-1,argv+1);\n }\n else if ((std::string(argv[1]) == \"bam2match\")) {\n return bam2match(argc-1,argv+1);\n }\n else if ((std::string(argv[1]) == \"ase\")) {\n return ase(argc-1,argv+1);\n }\n else if ((std::string(argv[1]) == \"replication\")) {\n return repliseq(argc-1,argv+1);\n }\n std::cerr << \"Unrecognized command \" << std::string(argv[1]) << std::endl;\n return 1;\n}\n" }, { "alpha_fraction": 0.5877045392990112, "alphanum_fraction": 0.6030638217926025, "avg_line_length": 40.560001373291016, "blob_id": "eb9f7884c10fa35d04ebdea837d3c0c4512ff164", "content_id": "9727e1f161b313e6dc8b521b28afae9728907a82", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 24936, "license_type": "permissive", "max_line_length": 169, "num_lines": 600, "path": "/src/bamstats.h", "repo_name": "tobiasrausch/bamStats", "src_encoding": "UTF-8", "text": "#ifndef BAMSTATS_H\n#define BAMSTATS_H\n\n#include <limits>\n\n#include <boost/dynamic_bitset.hpp>\n#include <boost/unordered_map.hpp>\n#include <boost/date_time/posix_time/posix_time.hpp>\n#include <boost/date_time/gregorian/gregorian.hpp>\n#include <boost/progress.hpp>\n#include <boost/iostreams/device/file.hpp>\n#include <boost/iostreams/filtering_stream.hpp>\n#include <boost/iostreams/filter/zlib.hpp>\n#include <boost/iostreams/filter/gzip.hpp>\n\n#include <htslib/sam.h>\n#include <htslib/faidx.h>\n\n#include \"tenX.h\"\n#include \"util.h\"\n#include \"json.h\"\n#include \"tsv.h\"\n#include \"qcstruct.h\"\n\nnamespace bamstats\n{\n\n template<typename TChromosomeRegions, typename TBpCoverage, typename TBedCounts>\n inline void\n _summarizeBedCoverage(TChromosomeRegions const& chrRegions, TBpCoverage const& cov, int32_t refIndex, std::string const& rg, TBedCounts& be) {\n typename BedCounts::TRgBpMap::iterator itChr = be.gCov[refIndex].find(rg);\n typename BedCounts::TOnTargetMap::iterator itOT = be.onTarget.find(rg);\n for(int32_t s = 0; s < (int32_t) itOT->second.size(); ++s) {\n // Avoid over-counting\n typedef boost::dynamic_bitset<> TBitSet;\n TBitSet used(cov.size());\n for(uint32_t i = 0; i<chrRegions.size(); ++i) {\n\tint64_t avgCov = 0;\n\tint32_t rStart = std::max(0, chrRegions[i].start - s * be.stepsize);\n\tint32_t rEnd = std::min((int32_t) cov.size(), chrRegions[i].end + s * be.stepsize);\n\tfor(int32_t k = rStart; k < rEnd; ++k) {\n\t if (!used[k]) {\n\t avgCov += cov[k];\n\t used[k] = 1;\n\t }\n\t}\n\titOT->second[s] += avgCov;\n\tif (s == 0) {\n\t if (chrRegions[i].start < chrRegions[i].end) itChr->second[i] = (typename BedCounts::TAvgCov) ( (double) avgCov / (double) (chrRegions[i].end - chrRegions[i].start));\n\t else itChr->second[i] = (typename BedCounts::TAvgCov) (0);\n\t}\n }\n }\n }\n\n template<typename TConfig>\n inline int32_t\n bamStatsRun(TConfig& c) {\n // Load bam file\n samFile* samfile = sam_open(c.bamFile.string().c_str(), \"r\");\n hts_set_fai_filename(samfile, c.genome.string().c_str());\n bam_hdr_t* hdr = sam_hdr_read(samfile);\n\n // Collect reference features\n ReferenceFeatures rf(hdr->n_targets);\n\n // Parse regions from BED file or create one region per chromosome\n if (c.hasRegionFile) {\n if (is_gz(c.regionFile)) {\n\tstd::ifstream file(c.regionFile.string().c_str(), std::ios_base::in | std::ios_base::binary);\n\tboost::iostreams::filtering_streambuf<boost::iostreams::input> dataIn;\n\tdataIn.push(boost::iostreams::gzip_decompressor());\n\tdataIn.push(file);\n\tstd::istream instream(&dataIn);\n\tstd::string line;\n\twhile(std::getline(instream, line)) {\n\t typedef boost::tokenizer< boost::char_separator<char> > Tokenizer;\n\t boost::char_separator<char> sep(\" \\t,;\");\n\t Tokenizer tokens(line, sep);\n\t Tokenizer::iterator tokIter = tokens.begin();\n\t std::string chrName = *tokIter++;\n\t // Map chromosome names to the bam header chromosome IDs\n\t int32_t chrid = bam_name2id(hdr, chrName.c_str());\n\t // Valid ID?\n\t if (chrid >= 0) {\n\t if (tokIter!=tokens.end()) {\n\t int32_t start = boost::lexical_cast<int32_t>(*tokIter++);\n\t int32_t end = boost::lexical_cast<int32_t>(*tokIter++);\n\t rf.gRegions[chrid].push_back(Interval(start, end));\n\t }\n\t }\n\t}\n\tdataIn.pop();\n } else {\n\t// Parse regions from BED file\n\tstd::ifstream bedFile(c.regionFile.string().c_str(), std::ifstream::in);\n\tif (bedFile.is_open()) {\n\t while (bedFile.good()) {\n\t std::string line;\n\t getline(bedFile, line);\n\t typedef boost::tokenizer< boost::char_separator<char> > Tokenizer;\n\t boost::char_separator<char> sep(\" \\t,;\");\n\t Tokenizer tokens(line, sep);\n\t Tokenizer::iterator tokIter = tokens.begin();\n\t std::string chrName = *tokIter++;\n\t // Map chromosome names to the bam header chromosome IDs\n\t int32_t chrid = bam_name2id(hdr, chrName.c_str());\n\t // Valid ID?\n\t if (chrid >= 0) {\n\t if (tokIter!=tokens.end()) {\n\t\tint32_t start = boost::lexical_cast<int32_t>(*tokIter++);\n\t\tint32_t end = boost::lexical_cast<int32_t>(*tokIter++);\n\t\trf.gRegions[chrid].push_back(Interval(start, end));\n\t }\n\t }\n\t }\n\t bedFile.close();\n\t}\n }\n\n // Get total bed size\n for(int32_t refIndex = 0; refIndex < hdr->n_targets; ++refIndex) {\n\ttypedef boost::dynamic_bitset<> TBitSet;\n\tTBitSet bedcovered(hdr->target_len[refIndex]);\n\tfor(uint32_t i = 0; i < rf.gRegions[refIndex].size(); ++i)\n\t for(int32_t k = rf.gRegions[refIndex][i].start; (k < rf.gRegions[refIndex][i].end) && (k < (int32_t) hdr->target_len[refIndex]); ++k) bedcovered[k] = 1;\n\trf.totalBedSize += bedcovered.count();\n }\n }\n \n // Debug code\n //uint32_t rIndex = 0;\n //for(TGenomicRegions::const_iterator itG = rf.gRegions.begin(); itG != rf.gRegions.end(); ++itG, ++rIndex) {\n //for(TChromosomeRegions::const_iterator itC = itG->begin(); itC != itG->end(); ++itC) {\n //std::cout << rIndex << ',' << hdr->target_name[rIndex] << ',' << itC->start << ',' << itC->end << std::endl;\n //}\n //}\n\n // BED file statistics\n BedCounts be(hdr->n_targets, 25, 20);\n \n // Read group statistics\n typedef std::set<std::string> TRgSet;\n TRgSet rgs;\n getRGs(std::string(hdr->text), rgs);\n if (c.ignoreRG) rgs.insert(\"DefaultLib\");\n if ((c.singleRG) && (rgs.find(c.rgname) == rgs.end())) {\n\tstd::cerr << \"Read group is not present in BAM file: \" << c.rgname << std::endl;\n\treturn 1;\n }\n typedef boost::unordered_map<std::string, ReadGroupStats> TRGMap;\n TRGMap rgMap;\n for(typename TRgSet::const_iterator itRg = rgs.begin(); itRg != rgs.end(); ++itRg) {\n if (((c.ignoreRG) && (*itRg == \"DefaultLib\")) || ((c.singleRG) && (*itRg == c.rgname)) || ((!c.ignoreRG) && (!c.singleRG))) {\n\trgMap.insert(std::make_pair(*itRg, ReadGroupStats(hdr->n_targets)));\n\tfor(int32_t refIndex = 0; refIndex < hdr->n_targets; ++refIndex) {\n\t typename BedCounts::TRgBpMap::iterator itChr = be.gCov[refIndex].insert(std::make_pair(*itRg, typename BedCounts::TBpCov())).first;\n\t itChr->second.resize(rf.gRegions[refIndex].size());\n\t typename BedCounts::TOnTargetMap::iterator itOT = be.onTarget.insert(std::make_pair(*itRg, typename BedCounts::TOnTargetBp())).first;\n\t itOT->second.resize(be.onTSize, 0);\n\t}\n }\n }\n\n // Parse reference and BAM file\n boost::posix_time::ptime now = boost::posix_time::second_clock::local_time();\n std::cout << '[' << boost::posix_time::to_simple_string(now) << \"] \" << \"BAM file parsing\" << std::endl;\n boost::progress_display show_progress( hdr->n_targets );\n\n // GC- and N-content\n typedef boost::dynamic_bitset<> TBitSet;\n TBitSet nrun;\n TBitSet gcref;\n\n // Find N95 chromosome length\n {\n std::vector<uint32_t> chrlen(hdr->n_targets, 0);\n uint64_t genomelen = 0;\n for(int32_t refIndex = 0; refIndex < hdr->n_targets; ++refIndex) {\n\tchrlen[refIndex] = hdr->target_len[refIndex];\n\tgenomelen += hdr->target_len[refIndex];\n }\n std::sort(chrlen.begin(), chrlen.end(), std::greater<uint32_t>());\n uint64_t cumsum = 0;\n for(uint32_t i = 0; i < chrlen.size(); ++i) {\n\tcumsum += chrlen[i];\n\tif (cumsum > genomelen * c.nXChrLen) {\n\t if (chrlen[i] < c.minChrLen) c.minChrLen = chrlen[i];\n\t break;\n\t}\n }\n }\n\n // Parse genome\n int32_t refIndex = -1;\n char* seq = NULL;\n faidx_t* fai = fai_load(c.genome.string().c_str());\n bam1_t* rec = bam_init1();\n while (sam_read1(samfile, hdr, rec) >= 0) {\n // New chromosome?\n if ((!(rec->core.flag & BAM_FUNMAP)) && (rec->core.tid != refIndex)) {\n\t++show_progress;\n\t\n\t// Summarize bp-level coverage\n\tif (refIndex != -1) {\n\t for(typename TRGMap::iterator itRg = rgMap.begin(); itRg != rgMap.end(); ++itRg) {\n\t if ((c.hasRegionFile) && (!rf.gRegions[refIndex].empty())) _summarizeBedCoverage(rf.gRegions[refIndex], itRg->second.bc.cov, refIndex, itRg->first, be);\n\t for(uint32_t i = 0; i < hdr->target_len[refIndex]; ++i) {\n\t if (itRg->second.bc.cov[i] >= 1) {\n\t\t++itRg->second.bc.nd;\n\t\tif (itRg->second.bc.cov[i] == 1) ++itRg->second.bc.n1;\n\t\tif (itRg->second.bc.cov[i] == 2) ++itRg->second.bc.n2;\n\t }\n\t if (!nrun[i]) ++itRg->second.bc.bpWithCoverage[itRg->second.bc.cov[i]];\n\t }\n\t itRg->second.bc.cov.clear();\n\t }\n\t if (seq != NULL) free(seq);\n\t}\n\trefIndex = rec->core.tid;\n\t\n\t// Load chromosome\n\tint32_t seqlen = -1;\n\tstd::string tname(hdr->target_name[refIndex]);\n\tseq = faidx_fetch_seq(fai, tname.c_str(), 0, hdr->target_len[refIndex], &seqlen);\n\n\t// Set N-mask\n\tnrun.clear();\n\tnrun.resize(hdr->target_len[refIndex], false);\n\tgcref.clear();\n\tgcref.resize(hdr->target_len[refIndex], false);\n\trf.referencebp += hdr->target_len[refIndex];\n\tfor(uint32_t i = 0; i < hdr->target_len[refIndex]; ++i) {\n\t if ((seq[i] == 'c') || (seq[i] == 'C') || (seq[i] == 'g') || (seq[i] == 'G')) gcref[i] = 1;\n\t if ((seq[i] == 'n') || (seq[i] == 'N')) {\n\t nrun[i] = 1;\n\t ++rf.ncount;\n\t }\n\t}\n\t// Reference GC\n\trf.chrGC[refIndex].ncount = nrun.count();\n\trf.chrGC[refIndex].gccount = gcref.count();\n\tif ((hdr->target_len[refIndex] > 101) && (hdr->target_len[refIndex] >= c.minChrLen)) {\n\t uint32_t nsum = 0;\n\t uint32_t gcsum = 0;\n\t uint32_t halfwin = 50;\n\t for(uint32_t pos = halfwin; pos < hdr->target_len[refIndex] - halfwin; ++pos) {\n\t if (pos == halfwin) {\n\t for(uint32_t i = pos - halfwin; i<pos+halfwin+1; ++i) {\n\t\tnsum += nrun[i];\n\t\tgcsum += gcref[i];\n\t }\n\t } else {\n\t nsum -= nrun[pos - halfwin - 1];\n\t gcsum -= gcref[pos - halfwin - 1];\n\t nsum += nrun[pos + halfwin];\n\t gcsum += gcref[pos + halfwin];\n\t }\n\t if (!nsum) ++rf.refGcContent[gcsum];\n\t }\n\t if ((c.hasRegionFile) && (!rf.gRegions[refIndex].empty())) {\n\t // Target GC\n\t for(uint32_t k = 0; k < rf.gRegions[refIndex].size(); ++k) {\n\t nsum = 0;\n\t gcsum = 0;\n\t int32_t regstart = std::max(rf.gRegions[refIndex][k].start, (int32_t) halfwin);\n\t int32_t regend = std::min(rf.gRegions[refIndex][k].end, (int32_t) (hdr->target_len[refIndex] - halfwin));\n\t if (regstart < regend) {\n\t\tfor(int32_t pos = regstart; pos < regend; ++pos) {\n\t\t if (pos == regstart) {\n\t\t for(int32_t i = pos - halfwin; i < (int32_t) (pos+halfwin+1); ++i) {\n\t\t nsum += nrun[i];\n\t\t gcsum += gcref[i];\n\t\t }\n\t\t } else {\n\t\t nsum -= nrun[pos - halfwin - 1];\n\t\t gcsum -= gcref[pos - halfwin - 1];\n\t\t nsum += nrun[pos + halfwin];\n\t\t gcsum += gcref[pos + halfwin];\n\t\t }\n\t\t if (!nsum) ++be.bedGcContent[gcsum];\n\t\t}\n\t }\n\t }\n\t }\n\t}\n\t\n\t// Resize coverage vectors\n\tfor(typename TRGMap::iterator itRg = rgMap.begin(); itRg != rgMap.end(); ++itRg) itRg->second.bc.cov.resize(hdr->target_len[refIndex], 0);\n }\n \n // Get the library information\n std::string rG = \"DefaultLib\";\n if (!c.ignoreRG) {\n\tuint8_t *rgptr = bam_aux_get(rec, \"RG\");\n\tif (rgptr) {\n\t char* rg = (char*) (rgptr + 1);\n\t rG = std::string(rg);\n\t}\n\tif ((c.singleRG) && (rG != c.rgname)) continue;\n }\n typename TRGMap::iterator itRg = rgMap.find(rG);\n if (itRg == rgMap.end()) {\n\tstd::cerr << \"Missing read group: \" << rG << std::endl;\n\treturn 1;\n }\n\n // Alignments behind the reference end\n if ((!(rec->core.flag & BAM_FUNMAP)) && (((rec->core.pos >= (int32_t) hdr->target_len[refIndex]) || (lastAlignedPosition(rec) > hdr->target_len[refIndex])))) {\n\tstd::cerr << \"Alignment is past the reference end: \" << hdr->target_name[refIndex] << ':' << rec->core.pos << std::endl;\n\tcontinue;\n }\n\n // Paired counts\n if (rec->core.flag & BAM_FPAIRED) {\n\t++itRg->second.pc.paired;\n\tif (!((rec->core.flag & BAM_FUNMAP) || (rec->core.flag & BAM_FMUNMAP))) {\n\t ++itRg->second.pc.mapped;\n\t if (rec->core.tid == rec->core.mtid) {\n\t ++itRg->second.pc.mappedSameChr;\n\t if (rec->core.flag & BAM_FPROPER_PAIR) ++itRg->second.pc.mappedProper;\n\t }\n\t if (rec->core.pos > rec->core.mpos) {\n\t ++itRg->second.pc.totalISizeCount;\n\t int32_t outerISize = rec->core.pos - rec->core.mpos + alignmentLength(rec);\n\t switch(layout(rec)) {\n\t case 0:\n\t ++itRg->second.pc.orient[0];\n\t if (outerISize < itRg->second.pc.maxInsertSize) ++itRg->second.pc.fPlus[outerISize];\n\t else ++itRg->second.pc.fPlus[itRg->second.pc.maxInsertSize];\n\t break;\n\t case 1:\n\t if (outerISize < itRg->second.pc.maxInsertSize) ++itRg->second.pc.fMinus[outerISize];\n\t else ++itRg->second.pc.fMinus[itRg->second.pc.maxInsertSize];\n\t break;\n\t case 2:\n\t ++itRg->second.pc.orient[2];\n\t if (outerISize < itRg->second.pc.maxInsertSize) ++itRg->second.pc.rPlus[outerISize];\n\t else ++itRg->second.pc.rPlus[itRg->second.pc.maxInsertSize];\n\t break;\n\t case 3:\n\t ++itRg->second.pc.orient[3];\n\t if (outerISize < itRg->second.pc.maxInsertSize) ++itRg->second.pc.rMinus[outerISize];\n\t else ++itRg->second.pc.rMinus[itRg->second.pc.maxInsertSize];\n\t break;\n\t default:\n\t break;\n\t }\n\t }\n\t}\n }\n\n // Read counts\n if (rec->core.flag & (BAM_FSECONDARY | BAM_FQCFAIL | BAM_FDUP | BAM_FSUPPLEMENTARY | BAM_FUNMAP)) {\n\tif (rec->core.flag & BAM_FSECONDARY) ++itRg->second.rc.secondary;\n\tif (rec->core.flag & BAM_FQCFAIL) ++itRg->second.rc.qcfail;\n\tif (rec->core.flag & BAM_FDUP) ++itRg->second.rc.dup;\n\tif (rec->core.flag & BAM_FSUPPLEMENTARY) ++itRg->second.rc.supplementary;\n\tif (rec->core.flag & BAM_FUNMAP) {\n\t ++itRg->second.rc.unmap;\n\t} else {\n\t ++itRg->second.rc.mappedchr[refIndex];\n\t}\n\tif (rec->core.flag & BAM_FSECONDARY) {\n\t if (!c.secondary) continue;\n\t // Evaluate secondary alignments\n\t // Sequence and quality strings might be '*' for secondary alignments\n\t} else if (rec->core.flag & BAM_FSUPPLEMENTARY) {\n\t if (!c.supplementary) continue;\n\t // Evaluate supplementary alignments\n\t} else continue;\n }\n ++itRg->second.qc.qcount[(int32_t) rec->core.qual];\n ++itRg->second.rc.mappedchr[refIndex];\n if (rec->core.flag & BAM_FREAD2) ++itRg->second.rc.mapped2;\n else ++itRg->second.rc.mapped1;\n if (rec->core.flag & BAM_FREVERSE) ++itRg->second.rc.reverse;\n else ++itRg->second.rc.forward;\n if (rec->core.l_qseq) {\n\tif (rec->core.l_qseq < itRg->second.rc.maxReadLength) ++itRg->second.rc.lRc[(uint32_t) ((rec->core.flag & BAM_FREAD2) != 0)][rec->core.l_qseq];\n\telse ++itRg->second.rc.lRc[(uint32_t) ((rec->core.flag & BAM_FREAD2) != 0)][itRg->second.rc.maxReadLength];\n } else {\n\tint32_t slen = sequenceLength(rec);\n\tif (slen < itRg->second.rc.maxReadLength) ++itRg->second.rc.lRc[(uint32_t) ((rec->core.flag & BAM_FREAD2) != 0)][slen];\n\telse ++itRg->second.rc.lRc[(uint32_t) ((rec->core.flag & BAM_FREAD2) != 0)][itRg->second.rc.maxReadLength];\n }\n\n // Fetch molecule identifier\n uint8_t* miptr = bam_aux_get(rec, \"MI\");\n if (miptr) {\n\tc.isMitagged = true;\n\t++itRg->second.rc.mitagged;\n\tint32_t mitag = bam_aux2i(miptr);\n\tif ((mitag>=0) && (mitag < itRg->second.rc.maxUMI)) {\n\t // Lazy resize\n\t if (itRg->second.rc.umi.empty()) itRg->second.rc.umi.resize(itRg->second.rc.maxUMI, false);\n\t itRg->second.rc.umi[mitag] = true;\n\t}\n }\n \n // Fetch haplotype tag\n uint8_t* hpptr = bam_aux_get(rec, \"HP\");\n if (hpptr) {\n\tc.isHaplotagged = true;\n\t++itRg->second.rc.haplotagged;\n\n\t// If no phased block assume all in one phased block\n\tint32_t psId = 0;\n\tuint8_t* psptr = bam_aux_get(rec, \"PS\");\n\tif (psptr) psId = bam_aux2i(psptr);\n\tif ((int32_t) itRg->second.rc.brange.size() <= refIndex) itRg->second.rc.brange.resize(refIndex + 1, ReadCounts::TBlockRange());\n\tif (itRg->second.rc.brange[refIndex].find(psId) == itRg->second.rc.brange[refIndex].end()) {\n\t itRg->second.rc.brange[refIndex].insert(std::make_pair(psId, std::make_pair(rec->core.pos, lastAlignedPosition(rec))));\n\t} else {\n\t itRg->second.rc.brange[refIndex][psId].first = std::min((int32_t) rec->core.pos, itRg->second.rc.brange[refIndex][psId].first);\n\t itRg->second.rc.brange[refIndex][psId].second = std::max((int32_t) lastAlignedPosition(rec), itRg->second.rc.brange[refIndex][psId].second);\n\t}\n }\n\n // Get the read sequence\n typedef std::vector<uint8_t> TQuality;\n TQuality quality;\n quality.resize(rec->core.l_qseq);\n std::string sequence;\n sequence.resize(rec->core.l_qseq);\n uint8_t* seqptr = bam_get_seq(rec);\n uint8_t* qualptr = bam_get_qual(rec);\n for (int32_t i = 0; i < rec->core.l_qseq; ++i) {\n\tquality[i] = qualptr[i];\n\tsequence[i] = \"=ACMGRSVTWYHKDBN\"[bam_seqi(seqptr, i)];\n\t//char c = 33 + quality[i];\n\tint32_t relpos = i;\n\tif (rec->core.flag & BAM_FREVERSE) {\n\t relpos = rec->core.l_qseq - i - 1;\n\t if (relpos < itRg->second.rc.maxReadLength) {\n\t itRg->second.rc.bqCount[(uint32_t) ((rec->core.flag & BAM_FREAD2) != 0)][relpos] += (uint64_t) quality[i];\n\t if ((sequence[i] == 'N') || (sequence[i] == 'n')) ++itRg->second.rc.nCount[(uint32_t) ((rec->core.flag & BAM_FREAD2) != 0)][relpos];\n\t else if ((sequence[i] == 'A') || (sequence[i] == 'a')) ++itRg->second.rc.tCount[(uint32_t) ((rec->core.flag & BAM_FREAD2) != 0)][relpos];\n\t else if ((sequence[i] == 'C') || (sequence[i] == 'c')) ++itRg->second.rc.gCount[(uint32_t) ((rec->core.flag & BAM_FREAD2) != 0)][relpos];\n\t else if ((sequence[i] == 'G') || (sequence[i] == 'g')) ++itRg->second.rc.cCount[(uint32_t) ((rec->core.flag & BAM_FREAD2) != 0)][relpos];\n\t else if ((sequence[i] == 'T') || (sequence[i] == 't')) ++itRg->second.rc.aCount[(uint32_t) ((rec->core.flag & BAM_FREAD2) != 0)][relpos];\n\t } else {\n\t itRg->second.rc.bqCount[(uint32_t) ((rec->core.flag & BAM_FREAD2) != 0)][itRg->second.rc.maxReadLength] += (uint64_t) quality[i];\n\t if ((sequence[i] == 'N') || (sequence[i] == 'n')) ++itRg->second.rc.nCount[(uint32_t) ((rec->core.flag & BAM_FREAD2) != 0)][itRg->second.rc.maxReadLength];\n\t else if ((sequence[i] == 'A') || (sequence[i] == 'a')) ++itRg->second.rc.tCount[(uint32_t) ((rec->core.flag & BAM_FREAD2) != 0)][itRg->second.rc.maxReadLength];\n\t else if ((sequence[i] == 'C') || (sequence[i] == 'c')) ++itRg->second.rc.gCount[(uint32_t) ((rec->core.flag & BAM_FREAD2) != 0)][itRg->second.rc.maxReadLength];\n\t else if ((sequence[i] == 'G') || (sequence[i] == 'g')) ++itRg->second.rc.cCount[(uint32_t) ((rec->core.flag & BAM_FREAD2) != 0)][itRg->second.rc.maxReadLength];\n\t else if ((sequence[i] == 'T') || (sequence[i] == 't')) ++itRg->second.rc.aCount[(uint32_t) ((rec->core.flag & BAM_FREAD2) != 0)][itRg->second.rc.maxReadLength];\n\t }\n\t} else {\n\t if (relpos < itRg->second.rc.maxReadLength) {\n\t itRg->second.rc.bqCount[(uint32_t) ((rec->core.flag & BAM_FREAD2) != 0)][relpos] += (uint64_t) quality[i];\n\t if ((sequence[i] == 'N') || (sequence[i] == 'n')) ++itRg->second.rc.nCount[(uint32_t) ((rec->core.flag & BAM_FREAD2) != 0)][relpos];\n\t else if ((sequence[i] == 'A') || (sequence[i] == 'a')) ++itRg->second.rc.aCount[(uint32_t) ((rec->core.flag & BAM_FREAD2) != 0)][relpos];\n\t else if ((sequence[i] == 'C') || (sequence[i] == 'c')) ++itRg->second.rc.cCount[(uint32_t) ((rec->core.flag & BAM_FREAD2) != 0)][relpos];\n\t else if ((sequence[i] == 'G') || (sequence[i] == 'g')) ++itRg->second.rc.gCount[(uint32_t) ((rec->core.flag & BAM_FREAD2) != 0)][relpos];\n\t else if ((sequence[i] == 'T') || (sequence[i] == 't')) ++itRg->second.rc.tCount[(uint32_t) ((rec->core.flag & BAM_FREAD2) != 0)][relpos];\n\t } else {\n\t itRg->second.rc.bqCount[(uint32_t) ((rec->core.flag & BAM_FREAD2) != 0)][itRg->second.rc.maxReadLength] += (uint64_t) quality[i];\n\t if ((sequence[i] == 'N') || (sequence[i] == 'n')) ++itRg->second.rc.nCount[(uint32_t) ((rec->core.flag & BAM_FREAD2) != 0)][itRg->second.rc.maxReadLength];\n\t else if ((sequence[i] == 'A') || (sequence[i] == 'a')) ++itRg->second.rc.aCount[(uint32_t) ((rec->core.flag & BAM_FREAD2) != 0)][itRg->second.rc.maxReadLength];\n\t else if ((sequence[i] == 'C') || (sequence[i] == 'c')) ++itRg->second.rc.cCount[(uint32_t) ((rec->core.flag & BAM_FREAD2) != 0)][itRg->second.rc.maxReadLength];\n\t else if ((sequence[i] == 'G') || (sequence[i] == 'g')) ++itRg->second.rc.gCount[(uint32_t) ((rec->core.flag & BAM_FREAD2) != 0)][itRg->second.rc.maxReadLength];\n\t else if ((sequence[i] == 'T') || (sequence[i] == 't')) ++itRg->second.rc.tCount[(uint32_t) ((rec->core.flag & BAM_FREAD2) != 0)][itRg->second.rc.maxReadLength];\n\t }\n\t}\n }\n\n // Sequence GC content\n {\n\tint32_t halfwin = 50;\n\tint32_t pos = rec->core.pos + 51;\n\tif (rec->core.flag & BAM_FREVERSE) pos = rec->core.pos - 51;\n\tint32_t fragstart = pos - halfwin;\n\tint32_t fragend = pos + halfwin + 1;\n\tif ((fragstart >= 0) && (fragend < (int32_t) hdr->target_len[refIndex])) {\n\t int32_t ncount = 0;\n\t for(int32_t i = fragstart; i < fragend; ++i) {\n\t if (nrun[i]) ++ncount;\n\t }\n\t if (!ncount) {\n\t int32_t gccont = 0;\n\t for(int32_t i = fragstart; i < fragend; ++i) {\n\t if (gcref[i]) ++gccont;\n\t }\n\t ++itRg->second.rc.gcContent[gccont];\n\t }\n\t}\n }\n \n // Get the reference slice\n std::string refslice = boost::to_upper_copy(std::string(seq + rec->core.pos, seq + lastAlignedPosition(rec)));\n \n // Debug \n //std::cout << matchCount << ',' << mismatchCount << ',' << delCount << ',' << insCount << ',' << softClipCount << ',' << hardClipCount << std::endl;\n //std::cout << refslice << std::endl;\n //std::cout << sequence << std::endl;\n\n uint32_t rp = 0; // reference pointer\n uint32_t sp = 0; // sequence pointer\n\n \n // Parse the CIGAR\n uint32_t* cigar = bam_get_cigar(rec);\n bool spliced = false;\n bool softClippedOnce = false;\n bool hardClippedOnce = false;\n for (std::size_t i = 0; i < rec->core.n_cigar; ++i) {\n\tif ((bam_cigar_op(cigar[i]) == BAM_CMATCH) || (bam_cigar_op(cigar[i]) == BAM_CEQUAL) || (bam_cigar_op(cigar[i]) == BAM_CDIFF)) {\n\t // match or mismatch\n\t for(std::size_t k = 0; k<bam_cigar_oplen(cigar[i]);++k) {\n\t if (rec->core.l_qseq) {\n\t if (sequence[sp] == refslice[rp]) ++itRg->second.bc.matchCount;\n\t else ++itRg->second.bc.mismatchCount;\n\t } else {\n\t if (bam_cigar_op(cigar[i]) == BAM_CEQUAL) ++itRg->second.bc.matchCount;\n\t else if (bam_cigar_op(cigar[i]) == BAM_CDIFF) ++itRg->second.bc.mismatchCount;\n\t }\n\t // Count bp-level coverage\n\t if (itRg->second.bc.cov[rec->core.pos + rp] < itRg->second.bc.maxCoverage) ++itRg->second.bc.cov[rec->core.pos + rp];\n\t ++sp;\n\t ++rp;\n\t }\n\t} else if (bam_cigar_op(cigar[i]) == BAM_CDEL) {\n\t ++itRg->second.bc.delCount;\n\t if (rec->core.l_qseq) ++itRg->second.bc.delHomACGTN[homopolymerContext(sequence, sp, 3)];\n\t if (bam_cigar_oplen(cigar[i]) < itRg->second.bc.maxIndelSize) ++itRg->second.bc.delSize[bam_cigar_oplen(cigar[i])];\n\t else ++itRg->second.bc.delSize[itRg->second.bc.maxIndelSize];\n\t rp += bam_cigar_oplen(cigar[i]);\n\t} else if (bam_cigar_op(cigar[i]) == BAM_CINS) {\n\t ++itRg->second.bc.insCount;\n\t if (rec->core.l_qseq) ++itRg->second.bc.insHomACGTN[homopolymerContext(sequence, sp, 3)];\n\t if (bam_cigar_oplen(cigar[i]) < itRg->second.bc.maxIndelSize) ++itRg->second.bc.insSize[bam_cigar_oplen(cigar[i])];\n\t else ++itRg->second.bc.insSize[itRg->second.bc.maxIndelSize];\n\t sp += bam_cigar_oplen(cigar[i]);\n\t} else if (bam_cigar_op(cigar[i]) == BAM_CSOFT_CLIP) {\n\t if (!softClippedOnce) {\n\t ++itRg->second.bc.softClipCount;\n\t softClippedOnce = true;\n\t }\n\t sp += bam_cigar_oplen(cigar[i]);\n\t} else if(bam_cigar_op(cigar[i]) == BAM_CHARD_CLIP) {\n\t if (!hardClippedOnce) {\n\t ++itRg->second.bc.hardClipCount;\n\t hardClippedOnce = true;\n\t }\n\t} else if (bam_cigar_op(cigar[i]) == BAM_CREF_SKIP) {\n\t if (!spliced) {\n\t ++itRg->second.rc.spliced;\n\t spliced = true;\n\t }\n\t rp += bam_cigar_oplen(cigar[i]);\n\t} else {\n\t std::cerr << \"Unknown Cigar options\" << std::endl;\n\t return 1;\n\t}\n }\n }\n \n // Summarize bp-level coverage\n if (refIndex != -1) {\n for(typename TRGMap::iterator itRg = rgMap.begin(); itRg != rgMap.end(); ++itRg) {\n\tif ((c.hasRegionFile) && (!rf.gRegions[refIndex].empty())) _summarizeBedCoverage(rf.gRegions[refIndex], itRg->second.bc.cov, refIndex, itRg->first, be);\n\tfor(uint32_t i = 0; i < hdr->target_len[refIndex]; ++i) {\n\t if (itRg->second.bc.cov[i] >= 1) {\n\t ++itRg->second.bc.nd;\n\t if (itRg->second.bc.cov[i] == 1) ++itRg->second.bc.n1;\n\t if (itRg->second.bc.cov[i] == 2) ++itRg->second.bc.n2;\n\t }\n\t if (!nrun[i]) ++itRg->second.bc.bpWithCoverage[itRg->second.bc.cov[i]];\n\t}\n\titRg->second.bc.cov.clear();\n }\n if (seq != NULL) free(seq);\n }\n\n // Output\n if (c.hasJsonOut) qcJsonOut(c, hdr, rgMap, be, rf);\n if (c.hasTsvOut) qcTsvOut(c, hdr, rgMap, be, rf);\n \n // clean-up\n bam_destroy1(rec);\n fai_destroy(fai);\n bam_hdr_destroy(hdr);\n sam_close(samfile);\n \n now = boost::posix_time::second_clock::local_time();\n std::cout << '[' << boost::posix_time::to_simple_string(now) << \"] Done.\" << std::endl;\n \n#ifdef PROFILE\n ProfilerStop();\n#endif\n\n\n return 0;\n }\n\n}\n\n#endif\n" }, { "alpha_fraction": 0.6366644501686096, "alphanum_fraction": 0.7888815402984619, "avg_line_length": 57.11538314819336, "blob_id": "432294808de23ceb3956172517ce4ec27482e5fb", "content_id": "8aa5871976e059fa7acd6c921107478868b4ecb2", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1511, "license_type": "permissive", "max_line_length": 265, "num_lines": 26, "path": "/example/split_ase_example.sh", "repo_name": "tobiasrausch/bamStats", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nSCRIPT=$(readlink -f \"$0\")\nBASEDIR=$(dirname \"$SCRIPT\")\n\necho \"Download reference\"\nwget ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/technical/reference/GRCh38_reference_genome/GRCh38_full_analysis_set_plus_decoy_hla.fa\nsamtools faidx GRCh38_full_analysis_set_plus_decoy_hla.fa\n\necho \"Download phased variants for chr22\"\nwget ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/data_collections/1000_genomes_project/release/20181203_biallelic_SNV/ALL.chr22.shapeit2_integrated_v1a.GRCh38.20181129.phased.vcf.gz\nwget ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/data_collections/1000_genomes_project/release/20181203_biallelic_SNV/ALL.chr22.shapeit2_integrated_v1a.GRCh38.20181129.phased.vcf.gz.tbi\n\necho \"Subset to HG00732\"\nbcftools view -O b -o HG00732.bcf -s HG00732 -m2 -M2 -c 1 -C 1 ALL.chr22.shapeit2_integrated_v1a.GRCh38.20181129.phased.vcf.gz chr22:20000000-22000000\nbcftools index HG00732.bcf\n\necho \"Fetch HG00732 BAM slice\"\nsamtools view -b -T GRCh38_full_analysis_set_plus_decoy_hla.fa ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/data_collections/1000_genomes_project/data/PUR/HG00732/alignment/HG00732.alt_bwamem_GRCh38DH.20150718.PUR.low_coverage.cram chr22:20000000-22000000 > HG00732.bam\nsamtools index HG00732.bam\n\necho \"Split BAM\"\n${BASEDIR}/../src/alfred split -r GRCh38_full_analysis_set_plus_decoy_hla.fa -s HG00732 -v HG00732.bcf HG00732.bam\n\necho \"Create allele-specific counts\"\n${BASEDIR}/../src/alfred ase -r GRCh38_full_analysis_set_plus_decoy_hla.fa -s HG00732 -v HG00732.bcf -p -f HG00732.bam\n" }, { "alpha_fraction": 0.6136964559555054, "alphanum_fraction": 0.6257774233818054, "avg_line_length": 34.060150146484375, "blob_id": "43d6040edd2d7e03ea09b962a80f94b0ac7f65fa", "content_id": "427a34b2b938bfd20a22337dff5968faa21dbb14", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 13989, "license_type": "permissive", "max_line_length": 192, "num_lines": 399, "path": "/src/consensus.h", "repo_name": "tobiasrausch/bamStats", "src_encoding": "UTF-8", "text": "#ifndef CONSENSUS_H\n#define CONSENSUS_H\n\n#include <boost/program_options/cmdline.hpp>\n#include <boost/program_options/options_description.hpp>\n#include <boost/program_options/parsers.hpp>\n#include <boost/program_options/variables_map.hpp>\n#include <boost/filesystem.hpp>\n#include <boost/tokenizer.hpp>\n#include <iostream>\n#include <vector>\n#include <htslib/vcf.h>\n#include <htslib/sam.h>\n#include <math.h>\n#include <stdio.h>\n\n#include <htslib/sam.h>\n#include <htslib/faidx.h>\n\n#include \"util.h\"\n#include \"align.h\"\n#include \"msa.h\"\n\n\nnamespace bamstats {\n\n\nstruct ConfigConsensus {\n bool secondary;\n bool trimreads;\n uint16_t minMapQual;\n uint32_t window;\n int32_t gapopen;\n int32_t gapext;\n int32_t match;\n int32_t mismatch;\n float fractionCalled;\n std::string position;\n std::string format;\n std::string seqtype;\n std::string outformat;\n DnaScore<int32_t> aliscore;\n boost::filesystem::path alignment;\n boost::filesystem::path consensus;\n boost::filesystem::path inputfile;\n};\n\n\ntemplate<typename TConfig>\ninline void\n_loadFastaReads(TConfig const& c, std::vector<std::string>& rs) {\n // Load read set\n faidx_t* fai = fai_load(c.inputfile.string().c_str());\n rs.resize(faidx_nseq(fai));\n for(int32_t refIndex = 0; refIndex < faidx_nseq(fai); ++refIndex) {\n std::string rn = faidx_iseq(fai, refIndex);\n int32_t seqlen = -1;\n char* seq = faidx_fetch_seq(fai, rn.c_str(), 0, faidx_seq_len(fai, rn.c_str()), &seqlen);\n std::cout << \"Read name: \" << rn << \", Length: \" << seqlen << std::endl;\n rs[refIndex] = std::string(seq);\n free(seq);\n }\n fai_destroy(fai);\n}\n\ninline bool\nfindReadTrim(bam1_t const* rec, int32_t const start, int32_t const end, int32_t& leftPos, int32_t& rightPos) {\n int32_t rp = rec->core.pos; // reference pointer\n int32_t sp = 0; // sequence pointer\n leftPos = -1;\n rightPos = -1;\n\n // Parse the CIGAR\n if (start < end) {\n uint32_t* cigar = bam_get_cigar(rec);\n for (std::size_t i = 0; i < rec->core.n_cigar; ++i) {\n if ((bam_cigar_op(cigar[i]) == BAM_CMATCH) || (bam_cigar_op(cigar[i]) == BAM_CEQUAL) || (bam_cigar_op(cigar[i]) == BAM_CDIFF)) {\n\t// match or mismatch\n\tfor(std::size_t k = 0; k<bam_cigar_oplen(cigar[i]);++k) {\n\t ++sp;\n\t ++rp;\n\t if ((leftPos == -1) && (rp >= start)) leftPos = sp;\n\t if ((rightPos == -1) && (rp >= end)) {\n\t rightPos = sp;\n\t return true;\n\t }\n\t}\n } else {\n\tif (bam_cigar_op(cigar[i]) == BAM_CDEL) {\n\t rp += bam_cigar_oplen(cigar[i]);\n\t} else if (bam_cigar_op(cigar[i]) == BAM_CINS) {\n\t sp += bam_cigar_oplen(cigar[i]);\n\t} else if (bam_cigar_op(cigar[i]) == BAM_CSOFT_CLIP) {\n\t sp += bam_cigar_oplen(cigar[i]);\n\t} else if(bam_cigar_op(cigar[i]) == BAM_CHARD_CLIP) {\n\t // Nothing\n\t} else if (bam_cigar_op(cigar[i]) == BAM_CREF_SKIP) {\n\t rp += bam_cigar_oplen(cigar[i]);\n\t} else {\n\t std::cerr << \"Unknown Cigar options\" << std::endl;\n\t return 1;\n\t}\n\tif ((leftPos == -1) && (rp >= start)) leftPos = sp;\n\tif ((rightPos == -1) && (rp >= end)) {\n\t rightPos = sp;\n\t return true;\n\t}\n }\n }\n }\n return false;\n}\n \ntemplate<typename TConfig>\ninline bool\n_loadBamReads(TConfig const& c, std::vector<std::string>& rs) {\n if (!(boost::filesystem::exists(c.inputfile) && boost::filesystem::is_regular_file(c.inputfile) && boost::filesystem::file_size(c.inputfile))) {\n std::cerr << \"Alignment file is missing: \" << c.inputfile.string() << std::endl;\n return false;\n }\n samFile* samfile = sam_open(c.inputfile.string().c_str(), \"r\");\n if (samfile == NULL) {\n std::cerr << \"Fail to open file \" << c.inputfile.string() << std::endl;\n return false;\n }\n hts_idx_t* idx = sam_index_load(samfile, c.inputfile.string().c_str());\n if (idx == NULL) {\n std::cerr << \"Fail to open index for \" << c.inputfile.string() << std::endl;\n return false;\n }\n bam_hdr_t* hdr = sam_hdr_read(samfile);\n if (hdr == NULL) {\n std::cerr << \"Fail to open header for \" << c.inputfile.string() << std::endl;\n return false;\n }\n\n // Parse position\n typedef boost::tokenizer< boost::char_separator<char> > Tokenizer;\n boost::char_separator<char> sep(\":\");\n Tokenizer tokens(c.position, sep);\n Tokenizer::iterator tokIter = tokens.begin();\n bool posError = true;\n int32_t refIndex = -1;\n int32_t pos = -1;\n if (tokIter!=tokens.end()) {\n std::string chrName = *tokIter++;\n refIndex = bam_name2id(hdr, chrName.c_str());\n if (refIndex >= 0) {\n pos = boost::lexical_cast<int32_t>(*tokIter++);\n if ((pos >= 0) && (pos < (int32_t) hdr->target_len[refIndex])) posError = false;\n }\n }\n if (posError) {\n std::cerr << \"Position needs to be in the format chr:pos\" << std::endl;\n return false;\n }\n\n // Fetch reads\n std::set<unsigned> read_set;\n typedef boost::unordered_map<unsigned, bool> TMissingReads;\n TMissingReads missing_reads;\n {\n std::cout << \"Primary alignments\" << std::endl;\n hts_itr_t* iter = sam_itr_queryi(idx, refIndex, pos, pos+1);\n bam1_t* rec = bam_init1();\n while (sam_itr_next(samfile, iter, rec) >= 0) {\n if (rec->core.flag & (BAM_FQCFAIL | BAM_FDUP | BAM_FUNMAP)) continue;\n if (rec->core.qual < c.minMapQual) continue;\n\n // Secondary Alignments\n if (rec->core.flag & BAM_FSECONDARY) {\n\tif (c.secondary) {\n\t // No sequence information\n\t unsigned seed = hash_string(bam_get_qname(rec));\n\t if (read_set.find(seed) == read_set.end()) missing_reads[seed] = (rec->core.flag & BAM_FREVERSE);\n\t}\n } else {\n\t// Overlaps a minimal window?\n\tif (rec->core.pos + (int32_t) c.window <= pos) {\n\t if (rec->core.pos + alignmentLength(rec) >= pos + c.window) {\n\t unsigned seed = hash_string(bam_get_qname(rec));\n\t if (read_set.find(seed) == read_set.end()) {\n\t // Any sequence information?\n\t if (rec->core.l_qseq > 1) {\n\t\tstd::string sequence;\n\t\tsequence.resize(rec->core.l_qseq);\n\t\tuint8_t* seqptr = bam_get_seq(rec);\n\t\tfor (int i = 0; i < rec->core.l_qseq; ++i) sequence[i] = \"=ACMGRSVTWYHKDBN\"[bam_seqi(seqptr, i)];\n\t\tstd::cout << \"Read name: \" << bam_get_qname(rec) << \", Length: \" << rec->core.l_qseq << std::endl;\n\t\tif (c.trimreads) {\n\t\t int32_t leftPos = -1;\n\t\t int32_t rightPos = -1;\n\t\t bool success = findReadTrim(rec, pos - c.window, pos+c.window, leftPos, rightPos);\n\t\t if (success) rs.push_back(sequence.substr(leftPos, rightPos - leftPos));\n\t\t} else rs.push_back(sequence);\n\t\tread_set.insert(seed);\n\t } else {\n\t\tmissing_reads[seed] = (rec->core.flag & BAM_FREVERSE);\n\t }\n\t }\n\t }\n\t}\n }\n }\n // Clean-up\n bam_destroy1(rec);\n hts_itr_destroy(iter);\n }\n \n // Any missing reads?\n if (!missing_reads.empty()) {\n std::cout << \"Secondary alignments\" << std::endl;\n int32_t regstart = std::max((int32_t) pos - 100000, 0);\n int32_t regend = std::min((int32_t) pos + 100000, (int32_t) hdr->target_len[refIndex]);\n hts_itr_t* iter = sam_itr_queryi(idx, refIndex, regstart, regend);\n bam1_t* rec = bam_init1();\n while (sam_itr_next(samfile, iter, rec) >= 0) {\n if (rec->core.flag & (BAM_FQCFAIL | BAM_FDUP | BAM_FUNMAP)) continue;\n if (rec->core.qual < c.minMapQual) continue;\n unsigned seed = hash_string(bam_get_qname(rec));\n if (missing_reads.find(seed) != missing_reads.end()) {\n\tif (read_set.find(seed) == read_set.end()) {\n\t // Any sequence information?\n\t if (rec->core.l_qseq > 1) {\n\t std::string sequence;\n\t sequence.resize(rec->core.l_qseq);\n\t uint8_t* seqptr = bam_get_seq(rec);\n\t for (int i = 0; i < rec->core.l_qseq; ++i) sequence[i] = \"=ACMGRSVTWYHKDBN\"[bam_seqi(seqptr, i)];\n\t std::cout << \"Read name: \" << bam_get_qname(rec) << \", Length: \" << rec->core.l_qseq << std::endl;\n\n\t // Check alignment direction\n\t if ( (rec->core.flag & BAM_FREVERSE) == (missing_reads[seed]) ) {\n\t if (c.trimreads) {\n\t\tint32_t leftPos = -1;\n\t\tint32_t rightPos = -1;\n\t\tbool success = findReadTrim(rec, pos - c.window, pos+c.window, leftPos, rightPos);\n\t\tif (success) rs.push_back(sequence.substr(leftPos, rightPos - leftPos));\n\t } else rs.push_back(sequence);\n\t } else {\n\t if (c.trimreads) {\n\t\t// Tricky, todo\n\t } else {\n\t\treverseComplement(sequence);\n\t\trs.push_back(sequence);\n\t }\n\t }\n\t read_set.insert(seed);\n\t }\n\t}\n }\n }\n // Clean-up\n bam_destroy1(rec);\n hts_itr_destroy(iter);\n }\n std::cout << \"Number of reads: \" << rs.size() << std::endl;\n \n bam_hdr_destroy(hdr);\n hts_idx_destroy(idx);\n sam_close(samfile);\n return true;\n}\n\n\nint consensus(int argc, char **argv) {\n ConfigConsensus c;\n\n // Parameter\n boost::program_options::options_description generic(\"Generic options\");\n generic.add_options()\n (\"help,?\", \"show help message\")\n (\"format,f\", boost::program_options::value<std::string>(&c.format)->default_value(\"bam\"), \"input format [bam|fasta]\")\n (\"called,d\", boost::program_options::value<float>(&c.fractionCalled)->default_value(0.5), \"fraction of reads required for consensus\")\n (\"seqtype,t\", boost::program_options::value<std::string>(&c.seqtype)->default_value(\"ill\"), \"seq. type [ill|ont|pacbio|custom]\")\n ;\n\n boost::program_options::options_description bamopt(\"BAM input options\");\n bamopt.add_options()\n (\"mapqual,q\", boost::program_options::value<uint16_t>(&c.minMapQual)->default_value(10), \"min. mapping quality\")\n (\"position,p\", boost::program_options::value<std::string>(&c.position)->default_value(\"chr4:500500\"), \"position to generate consensus\")\n (\"window,w\", boost::program_options::value<uint32_t>(&c.window)->default_value(5), \"window around position to fetch reads\")\n (\"secondary,s\", \"consider secondary alignments\")\n (\"trimreads,r\", \"trim reads to window\")\n ;\n \n boost::program_options::options_description alignment(\"Alignment scoring options for 'custom' sequencing type\");\n alignment.add_options()\n (\"gapopen,g\", boost::program_options::value<int32_t>(&c.gapopen)->default_value(-10), \"gap open\")\n (\"gapext,e\", boost::program_options::value<int32_t>(&c.gapext)->default_value(-1), \"gap extension\")\n (\"match,m\", boost::program_options::value<int32_t>(&c.match)->default_value(5), \"match\")\n (\"mismatch,n\", boost::program_options::value<int32_t>(&c.mismatch)->default_value(-4), \"mismatch\")\n ;\n\n boost::program_options::options_description otp(\"Output options\");\n otp.add_options()\n (\"outformat,u\", boost::program_options::value<std::string>(&c.outformat)->default_value(\"v\"), \"output format [v|h]\")\n (\"alignment,a\", boost::program_options::value<boost::filesystem::path>(&c.alignment)->default_value(\"al.fa.gz\"), \"vertical/horizontal alignment\")\n (\"consensus,c\", boost::program_options::value<boost::filesystem::path>(&c.consensus)->default_value(\"cs.fa.gz\"), \"consensus\")\n ;\n \n boost::program_options::options_description hidden(\"Hidden options\");\n hidden.add_options()\n (\"input-file\", boost::program_options::value<boost::filesystem::path>(&c.inputfile), \"input bam/fasta file\")\n ;\n\n boost::program_options::positional_options_description pos_args;\n pos_args.add(\"input-file\", -1);\n\n boost::program_options::options_description cmdline_options;\n cmdline_options.add(generic).add(bamopt).add(alignment).add(otp).add(hidden);\n boost::program_options::options_description visible_options;\n visible_options.add(generic).add(bamopt).add(alignment).add(otp);\n boost::program_options::variables_map vm;\n boost::program_options::store(boost::program_options::command_line_parser(argc, argv).options(cmdline_options).positional(pos_args).run(), vm);\n boost::program_options::notify(vm);\n\n // Check command line arguments\n if ((vm.count(\"help\")) || (!vm.count(\"input-file\"))) {\n std::cout << \"Usage: alfred \" << argv[0] << \" [OPTIONS] <input.bam|input.fa.gz>\" << std::endl;\n std::cout << visible_options << \"\\n\";\n return 1;\n }\n\n // Secondary alignments\n if (vm.count(\"secondary\")) c.secondary = true;\n else c.secondary = false;\n\n // Trim reads?\n if (vm.count(\"trimreads\")) c.trimreads = true;\n else c.trimreads = false;\n\n // Set alignment score\n if (c.seqtype == \"ill\") {\n c.aliscore = DnaScore<int>(5, -4, -10, -1);\n c.window = 5;\n } else if (c.seqtype == \"ont\") {\n c.aliscore = DnaScore<int>(3, -2, -3, -1);\n c.window = 250;\n } else if (c.seqtype == \"pacbio\") {\n c.aliscore = DnaScore<int>(3, -2, -3, -1);\n c.window = 250;\n } else {\n c.aliscore = DnaScore<int32_t>(c.match, c.mismatch, c.gapopen, c.gapext);\n }\n\n // Show cmd\n boost::posix_time::ptime now = boost::posix_time::second_clock::local_time();\n std::cout << '[' << boost::posix_time::to_simple_string(now) << \"] \";\n std::cout << \"alfred \";\n for(int i=0; i<argc; ++i) { std::cout << argv[i] << ' '; }\n std::cout << std::endl;\n\n // Some status information\n std::cout << \"Input format: \" << c.format << std::endl;\n std::cout << \"Sequencing type: \" << c.seqtype << std::endl;\n std::cout << \"Alignment scoring (match: \" << c.aliscore.match << \", mismatch: \" << c.aliscore.mismatch << \", gapopen: \" << c.aliscore.go << \", gapext: \" << c.aliscore.ge << \")\" << std::endl;\n std::cout << \"Window: \" << c.window << std::endl;\n\n // Load reads for consensus\n typedef std::vector<std::string> TReads;\n TReads rs;\n if (vm.count(\"input-file\")) {\n if (c.format == \"fasta\") {\n _loadFastaReads(c, rs);\n } else {\n bool rtval = _loadBamReads(c, rs);\n if (!rtval) return 1;\n }\n }\n\n // Any reads?\n if (rs.empty()) {\n std::cerr << \"No reads for consensus found!\" << std::endl;\n return 1;\n }\n\n // Generate Consensus\n std::string consensus;\n msa(c, rs, consensus);\n\n // Output consensus\n boost::iostreams::filtering_ostream rcfile;\n rcfile.push(boost::iostreams::gzip_compressor());\n rcfile.push(boost::iostreams::file_sink(c.consensus.c_str(), std::ios_base::out | std::ios_base::binary));\n rcfile << \">Consensus\" << std::endl;\n rcfile << consensus << std::endl;\n rcfile.pop();\n\n // Done\n now = boost::posix_time::second_clock::local_time();\n std::cout << '[' << boost::posix_time::to_simple_string(now) << \"] Done.\" << std::endl;\n \n return 0;\n}\n\n\n}\n\n#endif\n" }, { "alpha_fraction": 0.7370222210884094, "alphanum_fraction": 0.7393040657043457, "avg_line_length": 29.224138259887695, "blob_id": "d8e5c150df25b7686587080746c3dc2fae8f2574", "content_id": "2cf50a1ef579f2258c796421e023fe823fcb5e9d", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1753, "license_type": "permissive", "max_line_length": 391, "num_lines": 58, "path": "/docs/installation/README.md", "repo_name": "tobiasrausch/bamStats", "src_encoding": "UTF-8", "text": "# Installation\n\nAlfred is available as a [Bioconda package](https://anaconda.org/bioconda/alfred), as a pre-compiled statically linked binary from [Alfred's github release page](https://github.com/tobiasrausch/alfred/releases/) or as a minimal [Docker container](https://hub.docker.com/r/trausch/alfred/). All code is open-source and hosted on [Alfred's GitHub page](https://github.com/tobiasrausch/alfred).\n\n## Installation from Source\n\nTo build Alfred from source you need some build essentials and the Boost libraries, i.e. for Ubuntu:\n\n```bash\napt install \\\n build-essential g++ \\\n cmake \\\n git-all \\\n liblzma-dev \\\n zlib1g-dev \\\n libbz2-dev \\\n liblzma-dev \\\n libboost-date-time-dev \\\n libboost-program-options-dev \\\n libboost-system-dev \\\n libboost-filesystem-dev \\\n libboost-iostreams-dev\n```\n\nOnce you have installed these system libraries you can compile and link Alfred.\n\n```bash\ngit clone --recursive https://github.com/tobiasrausch/alfred.git\ncd alfred/\nmake all\nmake install\n./bin/alfred -h\n```\n\n## Custom Boost installation directory\n\nAlfred requires Boost and you can install Boost locally using\n\n```bash\ngit clone --recursive https://github.com/boostorg/boost.git\ncd boost/\n./bootstrap.sh --prefix=`pwd` --without-icu --with-libraries=iostreams,filesystem,system,program_options,date_time\n./b2\n./b2 headers\ncd ..\n```\n\nYou can then specify the custom Boost installation directory (i.e., `/opt/boost` below) using\n\n```bash\n# modify the following line accordingly\nBOOSTROOT=/opt/boost\ngit clone --recursive https://github.com/tobiasrausch/alfred.git\ncd alfred/\nmake CMDCXXFLAGS=\"-isystem $BOOSTROOT\" CMDLDFLAGS=\"-L$BOOSTROOT/stage/lib -Wl,-rpath,$BOOSTROOT/stage/lib\" all\nmake install\n./bin/alfred -h\n```\n" }, { "alpha_fraction": 0.5641529560089111, "alphanum_fraction": 0.5826026201248169, "avg_line_length": 29.265392303466797, "blob_id": "d8fceffc7cd19899946956a1987ec0dc10515976", "content_id": "692271ae78944432f980b0c7984e09039fc77c58", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 14255, "license_type": "permissive", "max_line_length": 253, "num_lines": 471, "path": "/src/util.h", "repo_name": "tobiasrausch/bamStats", "src_encoding": "UTF-8", "text": "#ifndef UTIL_H\n#define UTIL_H\n\n#include <boost/filesystem.hpp>\n#include <boost/algorithm/string.hpp>\n#include <boost/algorithm/string.hpp>\n#include <boost/iostreams/filtering_streambuf.hpp>\n#include <boost/iostreams/filtering_stream.hpp>\n#include <boost/iostreams/copy.hpp>\n#include <boost/iostreams/filter/gzip.hpp>\n#include <boost/iostreams/device/file.hpp>\n#include <boost/math/distributions/binomial.hpp>\n\n#include <htslib/sam.h>\n\n\nnamespace bamstats\n{\n\n struct Interval {\n int32_t start;\n int32_t end;\n \n Interval(int32_t s, int32_t e) : start(s), end(e) {}\n };\n\n\n inline double\n binomTest(uint32_t x, uint32_t n, double p) {\n boost::math::binomial binomialdist(n, p);\n double cutoff = pdf(binomialdist, x);\n double pval = 0.0;\n for(uint32_t k = 0; k <= n; ++k) {\n double p = pdf(binomialdist, k);\n if (p <= cutoff) pval +=p;\n }\n return pval;\n }\n\n\n \n inline unsigned hash_string(const char *s) {\n unsigned h = 37;\n while (*s) {\n h = (h * 54059) ^ (s[0] * 76963);\n s++;\n }\n return h;\n }\n\n\n struct IntervalLabel {\n int32_t start;\n int32_t end;\n char strand;\n int32_t lid;\n\n explicit IntervalLabel(int32_t s) : start(s), end(s+1), strand('*'), lid(-1) {}\n IntervalLabel(int32_t s, int32_t e, char t, int32_t l) : start(s), end(e), strand(t), lid(l) {}\n };\n\n struct IntervalLabelId {\n int32_t start;\n int32_t end;\n char strand;\n int32_t lid;\n int32_t eid;\n\n explicit IntervalLabelId(int32_t s) : start(s), end(s+1), strand('*'), lid(-1), eid(-1) {}\n IntervalLabelId(int32_t s, int32_t e, char t, int32_t l, int32_t i) : start(s), end(e), strand(t), lid(l), eid(i) {}\n };\n\n \n template<typename TRecord>\n struct SortIntervalLabel : public std::binary_function<TRecord, TRecord, bool> {\n inline bool operator()(TRecord const& s1, TRecord const& s2) const {\n return s1.lid < s2.lid;\n }\n };\n\n template<typename TRecord>\n struct SortIntervalStart : public std::binary_function<TRecord, TRecord, bool> {\n inline bool operator()(TRecord const& s1, TRecord const& s2) const {\n return s1.start < s2.start;\n }\n };\n\n\n inline void\n _insertInterval(std::vector<IntervalLabel>& cr, int32_t s, int32_t e, char strand, int32_t lid, int32_t) {\n // Uniqueness not necessary because we flatten the interval map\n cr.push_back(IntervalLabel(s, e, strand, lid));\n }\n\n inline void\n _insertInterval(std::vector<IntervalLabelId>& cr, int32_t s, int32_t e, char strand, int32_t lid, int32_t eid) {\n // Check uniqueness\n bool isUnique = true;\n for(uint32_t i = 0; i < cr.size(); ++i) {\n if ((cr[i].start == s) && (cr[i].end == e) && (cr[i].strand == strand) && (cr[i].lid == lid)) {\n\tisUnique = false;\n\tbreak;\n }\n }\n if (isUnique) cr.push_back(IntervalLabelId(s, e, strand, lid, eid));\n }\n\n inline bool\n _strandOkay(bam1_t* rec, char const strand, uint16_t const stranded) {\n if (stranded) {\n if (stranded == 1) {\n\tif (rec->core.flag & BAM_FREAD1) {\n\t if (rec->core.flag & BAM_FREVERSE) {\n\t if (strand != '-') return false;\n\t } else {\n\t if (strand != '+') return false;\n\t }\n\t} else {\n\t if (rec->core.flag & BAM_FREVERSE) {\n\t if (strand != '+') return false;\n\t } else {\n\t if (strand != '-') return false;\n\t }\n\t}\n } else {\n\tif (rec->core.flag & BAM_FREAD1) {\n\t if (rec->core.flag & BAM_FREVERSE) {\n\t if (strand != '+') return false;\n\t } else {\n\t if (strand != '-') return false;\n\t }\n\t} else {\n\t if (rec->core.flag & BAM_FREVERSE) {\n\t if (strand != '-') return false;\n\t } else {\n\t if (strand != '+') return false;\n\t }\n\t}\n }\n }\n return true;\n }\n\n inline std::size_t hash_read(bam1_t* rec) {\n std::size_t seed = hash_string(bam_get_qname(rec));\n boost::hash_combine(seed, rec->core.tid);\n boost::hash_combine(seed, rec->core.pos);\n boost::hash_combine(seed, (rec->core.flag & BAM_FREAD2));\n return seed;\n }\n \n inline std::size_t hash_pair(bam1_t* rec) {\n std::size_t seed = hash_string(bam_get_qname(rec));\n boost::hash_combine(seed, rec->core.tid);\n boost::hash_combine(seed, rec->core.pos);\n boost::hash_combine(seed, rec->core.mtid);\n boost::hash_combine(seed, rec->core.mpos);\n return seed;\n }\n\n template<typename TGenomicRegions>\n inline void\n getGeneLength(TGenomicRegions const& gRegions, std::vector<uint32_t>& geneLength) {\n for(uint32_t refIndex = 0; refIndex < gRegions.size(); ++refIndex) {\n for(uint32_t i = 0; i < gRegions[refIndex].size(); ++i) {\n\tgeneLength[gRegions[refIndex][i].lid] += gRegions[refIndex][i].end - gRegions[refIndex][i].start;\n }\n }\n }\n \n inline int32_t\n homopolymerContext(std::string const& s, int32_t idx, int32_t homlen) {\n for(int32_t i = std::max(0, idx - (homlen - 1)); i <= (idx + 1); ++i) {\n if (i + homlen <= (int32_t) s.size()) {\n\tbool hompoly = true;\n\tfor(int32_t k = i + 1; k < i + homlen; ++k) {\n\t if (s[k] != s[i]) {\n\t hompoly = false;\n\t break;\n\t }\n\t}\n\tif (hompoly) {\n\t if (s[i] == 'A') return 0;\n\t else if (s[i] == 'C') return 1;\n\t else if (s[i] == 'G') return 2;\n\t else if (s[i] == 'T') return 3;\n\t else if (s[i] == 'N') return 4;\n\t}\n }\n }\n return 5; // None\n }\n\n inline std::size_t hash_pair_mate(bam1_t* rec) {\n std::size_t seed = hash_string(bam_get_qname(rec));\n boost::hash_combine(seed, rec->core.mtid);\n boost::hash_combine(seed, rec->core.mpos);\n boost::hash_combine(seed, rec->core.tid);\n boost::hash_combine(seed, rec->core.pos);\n return seed;\n }\n\n inline bool is_gff3(boost::filesystem::path const& f) {\n std::ifstream in(f.string().c_str());\n if (!in) return false;\n in.close();\n\n std::ifstream file(f.string().c_str(), std::ios_base::in | std::ios_base::binary);\n boost::iostreams::filtering_streambuf<boost::iostreams::input> dataIn;\n dataIn.push(boost::iostreams::gzip_decompressor());\n dataIn.push(file);\n std::istream instream(&dataIn);\n std::string gline;\n std::getline(instream, gline);\n bool gff = false;\n if ((gline.size()>=5) && (gline.substr(0,5) == \"##gff\")) gff = true;\n file.close();\n return gff;\n }\n \n \n inline bool is_gz(boost::filesystem::path const& f) {\n std::ifstream bfile(f.string().c_str(), std::ios_base::binary | std::ios::ate);\n bfile.seekg(0, std::ios::beg);\n char byte1;\n bfile.read(&byte1, 1);\n char byte2;\n bfile.read(&byte2, 1);\n bfile.close();\n if ((byte1 == '\\x1F') && (byte2 == '\\x8B')) return true;\n else return false;\n }\n \n\n // F+ 0\n // F- 1\n // R+ 2\n // R- 3\n inline uint8_t layout(bam1_t const* rec) {\n if (rec->core.flag & BAM_FREAD1) {\n if (!(rec->core.flag & BAM_FREVERSE)) {\n\tif (!(rec->core.flag & BAM_FMREVERSE)) return (rec->core.pos < rec->core.mpos) ? 0 : 1;\n\telse return (rec->core.pos < rec->core.mpos) ? 2 : 3;\n } else {\n\tif (!(rec->core.flag & BAM_FMREVERSE)) return (rec->core.pos > rec->core.mpos) ? 2 : 3;\n\telse return (rec->core.pos > rec->core.mpos) ? 0 : 1;\n }\n } else {\n if (!(rec->core.flag & BAM_FREVERSE)) {\n\tif (!(rec->core.flag & BAM_FMREVERSE)) return (rec->core.pos < rec->core.mpos) ? 1 : 0;\n\telse return (rec->core.pos < rec->core.mpos) ? 2 : 3;\n } else {\n\tif (!(rec->core.flag & BAM_FMREVERSE)) return (rec->core.pos > rec->core.mpos) ? 2 : 3;\n\telse return (rec->core.pos > rec->core.mpos) ? 1 : 0;\n }\n }\n }\n \n inline uint32_t alignmentLength(bam1_t const* rec) {\n uint32_t* cigar = bam_get_cigar(rec);\n uint32_t alen = 0;\n for (uint32_t i = 0; i < rec->core.n_cigar; ++i)\n if ((bam_cigar_op(cigar[i]) == BAM_CMATCH) || (bam_cigar_op(cigar[i]) == BAM_CEQUAL) || (bam_cigar_op(cigar[i]) == BAM_CDIFF) || (bam_cigar_op(cigar[i]) == BAM_CDEL) || (bam_cigar_op(cigar[i]) == BAM_CREF_SKIP)) alen += bam_cigar_oplen(cigar[i]);\n return alen;\n }\n\n inline uint32_t sequenceLength(bam1_t const* rec) {\n uint32_t* cigar = bam_get_cigar(rec);\n uint32_t slen = 0;\n for (uint32_t i = 0; i < rec->core.n_cigar; ++i)\n if ((bam_cigar_op(cigar[i]) == BAM_CMATCH) || (bam_cigar_op(cigar[i]) == BAM_CEQUAL) || (bam_cigar_op(cigar[i]) == BAM_CDIFF) || (bam_cigar_op(cigar[i]) == BAM_CINS) || (bam_cigar_op(cigar[i]) == BAM_CSOFT_CLIP)) slen += bam_cigar_oplen(cigar[i]);\n return slen;\n }\n\n inline uint32_t\n lastAlignedPosition(bam1_t const* rec) {\n return rec->core.pos + alignmentLength(rec);\n }\n\n inline uint32_t halfAlignmentLength(bam1_t* rec) {\n return (alignmentLength(rec) / 2);\n }\n\n inline void\n reverseComplement(std::string& sequence) \n {\n std::string rev = boost::to_upper_copy(std::string(sequence.rbegin(), sequence.rend()));\n std::size_t i = 0;\n for(std::string::iterator revIt = rev.begin(); revIt != rev.end(); ++revIt, ++i) {\n switch (*revIt) {\n case 'A': sequence[i]='T'; break;\n case 'C': sequence[i]='G'; break;\n case 'G': sequence[i]='C'; break;\n case 'T': sequence[i]='A'; break;\n case 'N': sequence[i]='N'; break;\n default: break;\n }\n }\n }\n\n inline bool\n getSMTag(std::string const& header, std::string const& fileName, std::string& sampleName) {\n std::set<std::string> smIdentifiers;\n std::string delimiters(\"\\n\");\n typedef std::vector<std::string> TStrParts;\n TStrParts lines;\n boost::split(lines, header, boost::is_any_of(delimiters));\n TStrParts::const_iterator itH = lines.begin();\n TStrParts::const_iterator itHEnd = lines.end();\n bool rgPresent = false;\n for(;itH!=itHEnd; ++itH) {\n if (itH->find(\"@RG\")==0) {\n\tstd::string delim(\"\\t \");\n\tTStrParts keyval;\n\tboost::split(keyval, *itH, boost::is_any_of(delim));\n\tTStrParts::const_iterator itKV = keyval.begin();\n\tTStrParts::const_iterator itKVEnd = keyval.end();\n\tfor(;itKV != itKVEnd; ++itKV) {\n\t size_t sp = itKV->find(\":\");\n\t if (sp != std::string::npos) {\n\t std::string field = itKV->substr(0, sp);\n\t if (field == \"SM\") {\n\t rgPresent = true;\n\t std::string rgSM = itKV->substr(sp+1);\n\t smIdentifiers.insert(rgSM);\n\t }\n\t }\n\t}\n }\n }\n if (!rgPresent) {\n sampleName = fileName;\n return true;\n } else if (smIdentifiers.size() == 1) {\n sampleName = *(smIdentifiers.begin());\n return true;\n } else {\n sampleName = \"\";\n return false;\n }\n }\n\n inline void\n getRGs(std::string const& header, std::set<std::string>& rgs) {\n // Get read groups\n std::string delimiters(\"\\n\");\n typedef std::vector<std::string> TStrParts;\n TStrParts lines;\n boost::split(lines, header, boost::is_any_of(delimiters));\n TStrParts::const_iterator itH = lines.begin();\n TStrParts::const_iterator itHEnd = lines.end();\n bool rgPresent = false;\n for(;itH!=itHEnd; ++itH) {\n if (itH->find(\"@RG\")==0) {\n\tstd::string delim(\"\\t \");\n\tTStrParts keyval;\n\tboost::split(keyval, *itH, boost::is_any_of(delim));\n\tTStrParts::const_iterator itKV = keyval.begin();\n\tTStrParts::const_iterator itKVEnd = keyval.end();\n\tfor(;itKV != itKVEnd; ++itKV) {\n\t size_t sp = itKV->find(\":\");\n\t if (sp != std::string::npos) {\n\t std::string field = itKV->substr(0, sp);\n\t if (field == \"ID\") {\n\t rgPresent = true;\n\t std::string rgID = itKV->substr(sp+1);\n\t rgs.insert(rgID);\n\t }\n\t }\n\t}\n }\n }\n if (!rgPresent) rgs.insert(\"DefaultLib\");\n }\n\n template<typename TConfig>\n inline int32_t\n countRGs(TConfig const& c) {\n samFile* samfile = sam_open(c.bamFile.string().c_str(), \"r\");\n hts_set_fai_filename(samfile, c.genome.string().c_str());\n bam_hdr_t* hdr = sam_hdr_read(samfile);\n std::set<std::string> rgs;\n getRGs(std::string(hdr->text), rgs);\n bam_hdr_destroy(hdr);\n sam_close(samfile);\n return rgs.size();\n }\n\n\n template<typename TVector>\n inline int32_t\n medianFromHistogram(TVector const& vec) {\n int64_t tc = 0;\n for(typename TVector::const_iterator it = vec.begin(); it != vec.end(); ++it) tc += *it;\n int64_t medind = tc / 2;\n tc = 0;\n for(int32_t i = 0; i < (int32_t) vec.size(); ++i) {\n tc += (int64_t) vec[i];\n if (tc >= medind) return i;\n }\n return 0;\n }\n\n template<typename TVector>\n inline double\n meanFromHistogram(TVector const& vec) {\n int64_t tc = 0;\n for(typename TVector::const_iterator it = vec.begin(); it != vec.end(); ++it) tc += *it;\n int64_t mean = 0;\n for(int32_t i = 0; i < (int32_t) vec.size(); ++i) mean += (int64_t) (vec[i]) * (int64_t) (i);\n return (double) mean / (double) tc;\n }\n\n template<typename TVector>\n inline double\n sdFromHistogram(TVector const& vec) {\n double mu = meanFromHistogram(vec);\n int64_t tc = 0;\n for(typename TVector::const_iterator it = vec.begin(); it != vec.end(); ++it) tc += *it;\n double sd = 0;\n for(int32_t i = 0; i < (int32_t) vec.size(); ++i) sd += (double) (vec[i]) * ((double) (i) - mu) * ((double) (i) - mu);\n return std::sqrt((double) sd / (double) tc);\n }\n\n inline bool\n loadSingleFasta(std::string const& filename, std::string& faname, std::string& seq, bool nonACGTN) {\n faname = \"\";\n std::string tmpfasta = \"\";\n std::ifstream fafile(filename.c_str());\n if (fafile.good()) {\n std::string line;\n while(std::getline(fafile, line)) {\n\tif (!line.empty()) {\n\t if (line[0] == '>') {\n\t if (!faname.empty()) {\n\t std::cerr << \"Only single-chromosome FASTA files are supported.\" << std::endl;\n\t return false;\n\t }\n\t if (line.at(line.length() - 1) == '\\r' ){\n\t faname = line.substr(1, line.length() - 2);\n\t } else {\n\t faname = line.substr(1);\n\t }\n\t } else {\n\t if (line.at(line.length() - 1) == '\\r' ){\n\t tmpfasta += boost::to_upper_copy(line.substr(0, line.length() - 1));\n\t } else {\n\t tmpfasta += boost::to_upper_copy(line);\n\t }\n\t }\n\t}\n }\n fafile.close();\n }\n // Check FASTA\n for(uint32_t k = 0; k < tmpfasta.size(); ++k) {\n if (nonACGTN) seq += tmpfasta[k];\n else {\n\tif ((tmpfasta[k] == 'A') || (tmpfasta[k] == 'C') || (tmpfasta[k] == 'G') || (tmpfasta[k] == 'T') || (tmpfasta[k] == 'N')) seq += tmpfasta[k];\n }\n }\n if (seq.size() != tmpfasta.size()) {\n std::cerr << \"FASTA file \" << filename << \" contains illegal nucleotides!\" << std::endl;\n return false;\n } else return true;\n }\n\n\n}\n\n#endif\n" }, { "alpha_fraction": 0.591224193572998, "alphanum_fraction": 0.5942742228507996, "avg_line_length": 24.578014373779297, "blob_id": "c3c737c907c96b87270d5355c56b4b298289ff8f", "content_id": "44ab439ba063d846c939ea53a70f17660e46c26a", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 14430, "license_type": "permissive", "max_line_length": 97, "num_lines": 564, "path": "/client/src/static/js/alfred.js", "repo_name": "tobiasrausch/bamStats", "src_encoding": "UTF-8", "text": "import axios from 'axios'\nimport Choices from 'choices.js'\nimport * as FilePond from 'filepond'\nimport { saveAs } from 'file-saver/FileSaver'\nimport { countBy, uniq, zip } from 'lodash'\nimport csv from 'papaparse'\nimport pako from 'pako'\n\nimport examples from '../examples/examples.json'\n\n$('#mainTab a').on('click', function(e) {\n e.preventDefault()\n $(this).tab('show')\n})\n\n$(function() {\n $('[data-toggle=\"tooltip\"]').tooltip()\n})\n\nconst resultLink = document.getElementById('link-results')\n\nconst submitButton = document.getElementById('btn-submit')\nsubmitButton.addEventListener('click', function() {\n resultLink.click()\n run()\n})\n\nlet data, exampleData, readGroups, summary\n\nconst inputFile = document.getElementById('inputFile')\nconst chartsContainer = document.getElementById('charts-container')\nconst resultContainer = document.getElementById('result-container')\nconst resultInfo = document.getElementById('result-info')\nconst resultError = document.getElementById('result-error')\nconst selectSample = Select(document.getElementById('select-sample'))\nconst selectReadGroup = Select(document.getElementById('select-rg'))\nconst selectExamples = Select(document.getElementById('select-examples'))\nconst selectToc = Select(document.getElementById('select-toc'))\nconst summaryTab = document.getElementById('summary-tab')\n\nconst fileUpload = FilePond.create(inputFile)\n\nselectExamples.setChoices(\n examples.map(ex => ({\n value: ex.filename,\n label: ex.title\n })),\n 'value',\n 'label',\n true\n)\n\nfunction run() {\n const fileObjects = fileUpload.getFiles()\n\n if (fileObjects.length === 0) {\n showError('Error: no files specified')\n return\n }\n\n const fileCounts = countBy(fileObjects.map(f => f.filename))\n for (const [fileName, count] of Object.entries(fileCounts)) {\n if (count > 1) {\n showError(`Error: file <code>${fileName}</code> specified multiple times`)\n return\n }\n }\n\n hideElement(resultContainer)\n hideElement(resultError)\n showElement(resultInfo)\n summaryTab.innerHTML = ''\n\n mergeInputs(fileObjects).then(() => {\n if (data) {\n handleSuccess(data)\n }\n })\n}\n\nfunction Select(element, options = {}) {\n return new Choices(\n element,\n Object.assign(\n {\n shouldSort: false,\n searchResultLimit: 50,\n searchFields: ['label']\n },\n options\n )\n )\n}\n\nfunction readFile(file) {\n const fileReader = new FileReader()\n const isGzip = file.name.endsWith('.gz')\n\n if (isGzip) {\n fileReader.readAsArrayBuffer(file)\n } else {\n fileReader.readAsText(file)\n }\n\n return new Promise((resolve, reject) => {\n fileReader.onload = event => {\n let content = event.target.result\n if (isGzip) {\n content = pako.ungzip(content, { to: 'string' })\n }\n try {\n data = JSON.parse(content)\n // TODO better check\n if (!data.samples) {\n reject(`Error(${file.name}): wrong format, missing 'samples' key.`)\n }\n resolve(data)\n } catch (error) {\n reject(`Error(${file.name}): not a JSON file.`)\n }\n }\n })\n}\n\nfunction mergeInputs(fileObjects) {\n const fileReads = []\n for (const fileObject of fileObjects) {\n fileReads.push(readFile(fileObject.file))\n }\n return Promise.all(fileReads)\n .then(fileData => {\n data = {\n samples: fileData\n .map(d => d.samples)\n .reduce((acc, cur) => acc.concat(cur))\n }\n consolidateSummaries(data)\n })\n .catch(error => {\n data = undefined\n showError(error)\n })\n}\n\nfunction consolidateSummaries(data) {\n const allColumns = uniq(\n data.samples\n .map(s => s.summary.data.columns)\n .reduce((acc, cur) => acc.concat(cur))\n )\n for (const sample of data.samples) {\n const oldColumns = sample.summary.data.columns\n const oldRows = sample.summary.data.rows.map(\n row => new Map(zip(oldColumns, row))\n )\n sample.summary.data.columns = allColumns\n sample.summary.data.rows = []\n for (const row of oldRows) {\n sample.summary.data.rows.push(\n allColumns.map(col => (row.has(col) ? row.get(col) : null))\n )\n }\n }\n}\n\nfunction handleSuccess(data) {\n chartsContainer.innerHTML = ''\n\n const samples = uniq(data.samples.map(sample => sample.id))\n\n readGroups = {}\n data.samples.forEach(sample => {\n if (!(sample.id in readGroups)) {\n readGroups[sample.id] = new Set()\n }\n for (const rg of sample.readGroups) {\n if (readGroups[sample.id].has(rg.id)) {\n showError(`Error: read groups of sample '${sample.id}' are not unique.`)\n }\n readGroups[sample.id].add(rg.id)\n }\n })\n\n selectSample.setChoices(\n samples.map((s, i) => ({\n value: s,\n label: s,\n selected: i === 0\n })),\n 'value',\n 'label',\n true\n )\n\n selectReadGroup.setChoices(\n [...readGroups[samples[0]].values()].map((rg, i) => ({\n value: rg,\n label: rg,\n selected: i === 0\n })),\n 'value',\n 'label',\n true\n )\n\n summary = {\n title: data.samples[0].summary.title,\n data: {\n columns: data.samples[0].summary.data.columns,\n rows: data.samples\n .map(s => s.summary.data.rows)\n .reduce((acc, cur) => acc.concat(cur))\n }\n }\n\n summaryTable(summary, true)\n populateToc(samples[0], [...readGroups[samples[0]].values()][0])\n vis(data, samples[0], [...readGroups[samples[0]].values()][0])\n}\n\nwindow.handleExampleSelectChange = handleExampleSelectChange\nfunction handleExampleSelectChange() {\n const example = selectExamples.getValue(true)\n showExample(example)\n}\n\nwindow.handleReadGroupSelectChange = handleReadGroupSelectChange\nfunction handleReadGroupSelectChange() {\n const sample = selectSample.getValue(true)\n const readGroup = selectReadGroup.getValue(true)\n chartsContainer.innerHTML = ''\n populateToc(sample, readGroup)\n showElement(resultInfo)\n setTimeout(() => {\n vis(data, sample, readGroup)\n })\n}\n\nwindow.handleSampleSelectChange = handleSampleSelectChange\nfunction handleSampleSelectChange() {\n const sample = selectSample.getValue(true)\n const rgs = [...readGroups[sample].values()]\n selectReadGroup.setChoices(\n rgs.map((rg, i) => ({\n value: rg,\n label: rg,\n selected: i === 0\n })),\n 'value',\n 'label',\n true\n )\n const readGroup = rgs[0]\n chartsContainer.innerHTML = ''\n populateToc(sample, readGroup)\n showElement(resultInfo)\n setTimeout(() => {\n vis(data, sample, readGroup)\n })\n}\n\nfunction populateToc(sample, readGroup) {\n const dataRg = data.samples\n .filter(s => s.id === sample)\n .find(s => s.readGroups.find(rg => rg.id === readGroup))\n .readGroups.find(rg => rg.id === readGroup)\n\n selectToc.setChoices(\n dataRg.metrics.map((metric, i) => ({\n value: metric.id,\n label: metric.title,\n selected: i === 0\n })),\n 'value',\n 'label',\n true\n )\n}\n\nwindow.handleTocChange = handleTocChange\nfunction handleTocChange() {\n const targetId = selectToc.getValue(true)\n setTimeout(() => {\n document.getElementById(targetId).scrollIntoView()\n }, 0)\n}\n\nconst chartDispatch = {\n bar: chart,\n line: chart,\n table: table\n}\n\nfunction vis(data, sample, readGroup) {\n hideElement(resultInfo)\n hideElement(resultError)\n showElement(resultContainer)\n\n const dataRg = data.samples\n .filter(s => s.id === sample)\n .find(s => s.readGroups.find(rg => rg.id === readGroup))\n .readGroups.find(rg => rg.id === readGroup)\n\n for (const metric of dataRg.metrics) {\n chartDispatch[metric.type](metric, chartsContainer)\n }\n}\n\nfunction chart(metricData, parent) {\n const container = document.createElement('div')\n container.id = metricData.id\n parent.appendChild(container)\n\n const chartData = []\n for (let i = 0; i < metricData.y.data.length; i += 1) {\n const xData =\n metricData.x.data.length === metricData.y.data.length\n ? metricData.x.data[i].values\n : metricData.x.data[0].values\n const trace = {\n x: xData,\n y: metricData.y.data[i].values,\n name: metricData.y.data[i].title || ''\n }\n if (metricData.type === 'bar') {\n trace.type = 'bar'\n }\n chartData.push(trace)\n }\n\n let title = metricData.title\n if (metricData.subtitle) {\n title += `<br>${metricData.subtitle}`\n }\n\n const layout = {\n title,\n xaxis: {\n title: metricData.x.axis.title,\n zeroline: false\n },\n yaxis: {\n title: metricData.y.axis.title,\n zeroline: false\n }\n }\n\n if (metricData.type === 'bar' && metricData.y.data.length > 1) {\n layout.barmode = 'group' // default\n if (metricData.options && metricData.options.layout) {\n layout.barmode = metricData.options.layout\n }\n }\n\n if (metricData.x.axis.range) {\n layout.xaxis.range = metricData.x.axis.range\n // TODO is there really no built-in way for this?\n if (!metricData.y.axis.range) {\n let yAxisRange = [0, 0]\n for (let i = 0; i < metricData.y.data.length; i += 1) {\n const yData = metricData.y.data[i].values\n const xData =\n metricData.x.data.length === metricData.y.data.length\n ? metricData.x.data[i].values\n : metricData.x.data[0].values\n for (const [x, y] of zip(xData, yData)) {\n if (\n x < metricData.x.axis.range[0] ||\n x > metricData.x.axis.range[1]\n ) {\n continue\n }\n if (y < yAxisRange[0]) {\n yAxisRange[0] = y\n }\n if (y > yAxisRange[1]) {\n yAxisRange[1] = y\n }\n }\n }\n layout.yaxis.range = yAxisRange\n }\n }\n\n if (metricData.y.axis.range) {\n layout.yaxis.range = metricData.y.axis.range\n }\n\n Plotly.newPlot(container, chartData, layout)\n}\n\n// TODO consolidate / refactor table functions\n\nfunction table(tableData, parent) {\n const html = `\n <h4>${tableData.title}</h4>\n <div style=\"overflow-x: auto;\">\n <table class=\"table table-sm table-striped table-hover\">\n <thead>\n <tr>\n ${tableData.data.columns\n .map(title => `<th scope=\"col\">${title}</th>`)\n .join('')}\n </tr>\n </thead>\n <tbody>\n ${tableData.data.rows\n .map(\n row => `<tr>\n ${row\n .map(\n (value, i) =>\n `<td title=\"${tableData.data.columns[i]}\">${value}</td>`\n )\n .join('')}\n </tr>`\n )\n .join('')}\n </tbody>\n </table>\n </div>\n `\n const element = document.createElement('div')\n element.id = tableData.id\n element.innerHTML = html\n parent.appendChild(element)\n}\n\nfunction summaryTable(tableData, transpose = false) {\n let html = `\n <h4>${tableData.title}</h4>\n <div class=\"mb-2\">\n <button type=\"button\" class=\"btn btn-outline-primary\" onclick=\"transpose()\">\n <i class=\"fas fa-redo-alt\" style=\"margin-right: 5px;\"></i>\n Transpose table\n </button>\n <button type=\"button\" class=\"btn btn-outline-primary\" onclick=\"summaryDownload()\">\n <i class=\"fas fa-file-download\" style=\"margin-right: 5px;\"></i>\n Download .csv\n </button>\n </div>\n <div style=\"overflow-x: auto;\">\n `\n if (transpose) {\n const rows = zip(tableData.data.columns, ...tableData.data.rows)\n html += `\n <table id=\"summary-table\" class=\"table table-sm table-striped table-hover\" data-transposed>\n <tbody>\n ${rows\n .map(\n row => `<tr>\n ${row\n .map((value, i) => {\n if (i === 0) {\n return `<th scope=\"row\">${value}</th>`\n }\n return `<td title=\"${row[0]}\">${\n value === null ? '—' : value\n }</td>`\n })\n .join('')}\n </tr>`\n )\n .join('')}\n </tbody>\n </table>\n `\n } else {\n html += `\n <table id=\"summary-table\" class=\"table table-sm table-striped table-hover\">\n <thead>\n <tr>\n ${tableData.data.columns\n .map(title => `<th scope=\"col\">${title}</th>`)\n .join('')}\n </tr>\n </thead>\n <tbody>\n ${tableData.data.rows\n .map(\n row => `<tr>\n ${row\n .map(\n (value, i) =>\n `<td title=\"${tableData.data.columns[i]}\">${\n value === null ? '—' : value\n }</td>`\n )\n .join('')}\n </tr>`\n )\n .join('')}\n </tbody>\n </table>\n `\n }\n html += '</div>'\n summaryTab.innerHTML = html\n}\n\nwindow.transpose = transpose\nfunction transpose() {\n const tableElement = document.querySelector('#summary-table')\n const isTransposed = 'transposed' in tableElement.dataset\n summaryTable(summary, !isTransposed)\n}\n\nwindow.summaryDownload = summaryDownload\nfunction summaryDownload() {\n const tableElement = document.querySelector('#summary-table')\n const isTransposed = 'transposed' in tableElement.dataset\n let data\n if (isTransposed) {\n data = zip(summary.data.columns, ...summary.data.rows)\n } else {\n data = [summary.data.columns, ...summary.data.rows]\n }\n const csvData = csv.unparse(data)\n const blob = new Blob([csvData], { type: 'text/plain;charset=utf-8' })\n // TODO generate file name from input\n saveAs(blob, 'alfred-summary-stats.csv')\n}\n\nfunction showExample(filename) {\n hideElement(resultContainer)\n hideElement(resultError)\n chartsContainer.innerHTML = ''\n summaryTab.innerHTML = ''\n showElement(resultInfo)\n resultLink.click()\n\n const url = `./examples/${filename}`\n axios\n .get(url, {\n responseType: 'arraybuffer'\n })\n .then(response => {\n const content = pako.ungzip(response.data, { to: 'string' })\n exampleData = JSON.parse(content)\n data = exampleData\n handleSuccess(data)\n })\n .catch(error => {\n // FIXME proper error handling\n console.error(error)\n })\n}\n\nfunction showError(message) {\n hideElement(resultContainer)\n hideElement(resultInfo)\n summaryTab.innerHTML = ''\n\n showElement(resultError)\n document.querySelector('#error-message').innerHTML = message\n}\n\nfunction showElement(element) {\n element.classList.remove('d-none')\n}\n\nfunction hideElement(element) {\n element.classList.add('d-none')\n}\n" }, { "alpha_fraction": 0.6192643046379089, "alphanum_fraction": 0.6276071071624756, "avg_line_length": 35.88111877441406, "blob_id": "a9bfbfb12a8110e319f49452a474384058ef184c", "content_id": "06be97726b001a2598c5567d50dbaed177efebd3", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5274, "license_type": "permissive", "max_line_length": 162, "num_lines": 143, "path": "/src/repliseq.h", "repo_name": "tobiasrausch/bamStats", "src_encoding": "UTF-8", "text": "#ifndef REPLISEQ_H\n#define REPLISEQ_H\n\n#include <iostream>\n#include <vector>\n#include <fstream>\n\n#include <boost/program_options/cmdline.hpp>\n#include <boost/program_options/options_description.hpp>\n#include <boost/program_options/parsers.hpp>\n#include <boost/program_options/variables_map.hpp>\n#include <boost/date_time/posix_time/posix_time.hpp>\n#include <boost/date_time/gregorian/gregorian.hpp>\n#include <boost/tokenizer.hpp>\n#include <boost/filesystem.hpp>\n#include <boost/progress.hpp>\n\n#include <htslib/sam.h>\n#include <htslib/faidx.h>\n\n#ifdef PROFILE\n#include \"gperftools/profiler.h\"\n#endif\n\n#include \"repliseqproc.h\"\n\nnamespace bamstats {\n\n struct RepliSeqConfig {\n uint16_t minq;\n int32_t wsize;\n int32_t step;\n std::string outprefix;\n boost::filesystem::path genome;\n std::vector<boost::filesystem::path> files;\n };\n\n\n int repliseq(int argc, char **argv) {\n \n#ifdef PROFILE\n ProfilerStart(\"repliseq.prof\");\n#endif\n\n RepliSeqConfig c;\n\n // Parameter\n boost::program_options::options_description generic(\"Generic options\");\n generic.add_options()\n (\"help,?\", \"show help message\")\n (\"qual,q\", boost::program_options::value<uint16_t>(&c.minq)->default_value(1), \"min. mapping quality\")\n (\"window,w\", boost::program_options::value<int32_t>(&c.wsize)->default_value(50000), \"sliding window size\")\n (\"step,s\", boost::program_options::value<int32_t>(&c.step)->default_value(1000), \"window offset (step size)\")\n (\"reference,r\", boost::program_options::value<boost::filesystem::path>(&c.genome), \"reference fasta file (required)\")\n (\"outprefix,o\", boost::program_options::value<std::string>(&c.outprefix)->default_value(\"pref\"), \"output file prefix\")\n ;\n \n boost::program_options::options_description hidden(\"Hidden options\");\n hidden.add_options()\n (\"input-file\", boost::program_options::value<std::vector<boost::filesystem::path> >(&c.files), \"input bam files\")\n ;\n \n boost::program_options::positional_options_description pos_args;\n pos_args.add(\"input-file\", -1);\n \n boost::program_options::options_description cmdline_options;\n cmdline_options.add(generic).add(hidden);\n boost::program_options::options_description visible_options;\n visible_options.add(generic);\n boost::program_options::variables_map vm;\n boost::program_options::store(boost::program_options::command_line_parser(argc, argv).options(cmdline_options).positional(pos_args).run(), vm);\n boost::program_options::notify(vm);\n \n // Check command line arguments\n if ((vm.count(\"help\")) || (!vm.count(\"input-file\")) || (!vm.count(\"reference\"))) {\n std::cout << \"Usage: alfred \" << argv[0] << \" [OPTIONS] -r <ref.fa> -o outprefix <g1b.bam> <s1.bam> <s2.bam> <s3.bam> <s4.bam> <g2.bam>\" << std::endl;\n std::cout << visible_options << \"\\n\";\n return 1;\n } \n \n // Check genome\n if (!(boost::filesystem::exists(c.genome) && boost::filesystem::is_regular_file(c.genome) && boost::filesystem::file_size(c.genome))) {\n std::cerr << \"Input reference file is missing: \" << c.genome.string() << std::endl;\n return 1;\n } else {\n faidx_t* fai = fai_load(c.genome.string().c_str());\n if (fai == NULL) {\n\tif (fai_build(c.genome.string().c_str()) == -1) {\n\t std::cerr << \"Fail to open genome fai index for \" << c.genome.string() << std::endl;\n\t return 1;\n\t} else fai = fai_load(c.genome.string().c_str());\n }\n fai_destroy(fai);\n }\n \n // Check bam files\n for(unsigned int file_c = 0; file_c < c.files.size(); ++file_c) {\n if (!(boost::filesystem::exists(c.files[file_c]) && boost::filesystem::is_regular_file(c.files[file_c]) && boost::filesystem::file_size(c.files[file_c]))) {\n\tstd::cerr << \"Alignment file is missing: \" << c.files[file_c].string() << std::endl;\n\treturn 1;\n }\n samFile* samfile = sam_open(c.files[file_c].string().c_str(), \"r\");\n if (samfile == NULL) {\n\tstd::cerr << \"Fail to open file \" << c.files[file_c].string() << std::endl;\n\treturn 1;\n }\n hts_idx_t* idx = sam_index_load(samfile, c.files[file_c].string().c_str());\n if (idx == NULL) {\n\tstd::cerr << \"Fail to open index for \" << c.files[file_c].string() << std::endl;\n\treturn 1;\n }\n bam_hdr_t* hdr = sam_hdr_read(samfile);\n if (hdr == NULL) {\n\tstd::cerr << \"Fail to open header for \" << c.files[file_c].string() << std::endl;\n\treturn 1;\n }\n faidx_t* fai = fai_load(c.genome.string().c_str());\n for(int32_t refIndex=0; refIndex < hdr->n_targets; ++refIndex) {\n\tstd::string tname(hdr->target_name[refIndex]);\n\tif (!faidx_has_seq(fai, tname.c_str())) {\n\t std::cerr << \"BAM file chromosome \" << hdr->target_name[refIndex] << \" is NOT present in your reference file \" << c.genome.string() << std::endl;\n\t return 1;\n\t}\n }\n fai_destroy(fai);\n bam_hdr_destroy(hdr);\n hts_idx_destroy(idx);\n sam_close(samfile);\n }\n \n // Show cmd\n boost::posix_time::ptime now = boost::posix_time::second_clock::local_time();\n std::cout << '[' << boost::posix_time::to_simple_string(now) << \"] \";\n std::cout << \"alfred \";\n for(int i=0; i<argc; ++i) { std::cout << argv[i] << ' '; }\n std::cout << std::endl;\n \n return repliseqRun(c);\n }\n\n}\n\n#endif\n" } ]
41
tsidv/kpi
https://github.com/tsidv/kpi
499d7df320f8624462db6adb1f32187209fbe7c7
349a95183335a7cc553d8ee5d66e5be7fea46304
9419d4c070f05e07a057631cb6a61f6ce4a8e19d
refs/heads/master
2016-09-17T01:50:18.918936
2016-09-08T11:57:36
2016-09-08T11:57:36
65,447,962
4
0
null
2016-08-11T07:23:45
2016-09-07T15:08:38
2016-09-26T14:35:01
HTML
[ { "alpha_fraction": 0.5795148015022278, "alphanum_fraction": 0.5902965068817139, "avg_line_length": 32.727272033691406, "blob_id": "c30ec1c7aeab77d1688fb957caf7c200cb5f6fb2", "content_id": "243c1655661379ec3d7b9ee679f4622d0693ee03", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 742, "license_type": "no_license", "max_line_length": 77, "num_lines": 22, "path": "/data/prnAndNumOfObs.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nclass PrnAndNumOfObs:\n \"\"\"\n Author: Bhaskar Shankar (IDV-AG)\n Date : 05 Sept 2016\n\n Assigned to Class variable of ObservationsDataHeader\n VAR - PRN_AND_NUM_OF_OBS\n Parameters:\n SATELLITE_NUMBERS - Satellite numbers\n NUM_OF_OBS_PER_OBS_TYPE - number of observations for each observation\n type indicated in the \"SYS / # / OBS TYPES\"\n record\n If more than 9 observation types: Use continuation line(s)\n \"\"\"\n\n def __init__(self, satelliteNumbers=\"\", numOfObsPerObsType=\"\"):\n self.SATELLITE_NUMBERS = satelliteNumbers\n self.NUM_OF_OBS_PER_OBS_TYPE = numOfObsPerObsType\n" }, { "alpha_fraction": 0.6000000238418579, "alphanum_fraction": 0.6115384697914124, "avg_line_length": 31.5, "blob_id": "34328da2c402e6e8ce68b196214b6ebddb67a8d4", "content_id": "ffe7d5a35e584be5d1be12d4971894b0d4b8ff9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 780, "license_type": "no_license", "max_line_length": 79, "num_lines": 24, "path": "/data/observationRecords.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nclass Observationrecords:\n \"\"\"\n Author: Bhaskar Shankar (IDV-AG)\n Date : 05 Sept 2016\n\n MODULE: 5.6 Observation files decoder module\n\n Class variable for ObservationDecoderModel for Variable OBSERVATION_RECORDS\n\n Parameters:\n TIME_TAG_CAL - Each record has an associate time tag\n in calendar format\n OBSERVATION_PRNID_AND_RECORDS - PRN ID and corresponding observation\n data records\n \"\"\"\n\n def __init__(self, timeTagCal=\"\", observationPrnIdAndRecords=\"\"):\n self.TIME_TAG_CAL = timeTagCal\n # Class Ref: ObservationPnrAndRecords\n self.OBSERVATION_PRNID_AND_RECORDS = observationPrnIdAndRecords\n" }, { "alpha_fraction": 0.611341655254364, "alphanum_fraction": 0.6237897872924805, "avg_line_length": 30.434782028198242, "blob_id": "17f8d7ed667617cde79578769cf5f22732044f95", "content_id": "f709ca7b06e871f7bd9e1a819106da8452f7b4d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 723, "license_type": "no_license", "max_line_length": 79, "num_lines": 23, "path": "/data/observationPrnIdAndRecords.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nclass ObservationPrnIdAndRecords:\n \"\"\"\n Author: Bhaskar Shankar (IDV-AG)\n Date : 05 Sept 2016\n\n MODULE: 5.6 Observation files decoder module\n\n Class variable for Observationrecords for Variable PNR_AND_RECORDS\n\n Parameters:\n PRN_ID\t\t\t - Satellites id to be included in the computation\n OBSERVATION_RECORDS - Contains daily observations of Galileo satellites\n for the listed stations in Observables_list\n per PRNID per timetagcal of record.\n \"\"\"\n\n def __init__(self, prnId=\"\", observationRecords=\"\"):\n self.PRN_ID = prnId\n self.OBSERVATION_RECORDS = observationRecords\n" }, { "alpha_fraction": 0.5641729831695557, "alphanum_fraction": 0.6382978558540344, "avg_line_length": 32.1136360168457, "blob_id": "3fed79d5d890353d9c4c50fd082c8c488adef6f7", "content_id": "825377495029167c301ed2b225f56adee0378493", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1457, "license_type": "no_license", "max_line_length": 70, "num_lines": 44, "path": "/test/userGridGeneratorTest.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport unittest\nfrom kpi.usergridgenerator.userGridGenerator import UserGridGenerator\n\n\nclass testUserGridGenerator(unittest.TestCase):\n \"\"\"\n Test cases for UserGridGenerator class\n \"\"\"\n\n def setUp(self):\n \"\"\"\n setup call, runs before each test\n \"\"\"\n self.classToTest = UserGridGenerator()\n\n def testUserPositionCount(self):\n userPositions = self.classToTest.generateUserGrid()\n # -85 to 85 - for each 5 degree\n latitudeCount = 175 / 5\n # -175 to 180 - for each 5 degree\n longitudeCount = 360 / 5\n assert len(userPositions) == latitudeCount * longitudeCount\n\n def testLatitudeAndLongitudeValue(self):\n \"\"\"\n Latitude loop should start from -85 to +85\n Longitude loop should start from -175 to +180\n \"\"\"\n userPositions = self.classToTest.generateUserGrid()\n assert userPositions[0][1] == -85\n assert userPositions[0][0] == -175\n assert userPositions[2519][1] == 85\n assert userPositions[2519][0] == 180\n\n def testCoordinatesValue(self):\n coordinatesValue = self.classToTest.getECEFPositions(35, -120)\n assert str(coordinatesValue[0]) == str(\"-2615213.42004073\")\n assert str(coordinatesValue[1]) == str(\"-4529682.51614651\")\n assert str(coordinatesValue[2]) == str(\"3637866.90980039\")\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.42527011036872864, "alphanum_fraction": 0.5354141592979431, "avg_line_length": 40.650001525878906, "blob_id": "3eaf10ff4618e2c0e2ad9b572e2f8f42d18d02b5", "content_id": "ceb40ae8ef63bc9f3bba2a9bbc7e44b2836d9bb5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3332, "license_type": "no_license", "max_line_length": 79, "num_lines": 80, "path": "/test/configFileParserTest.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- cleapSecondsoding: utf-8 -*-\n\nimport unittest\nfrom nose.tools import *\nimport sys\nfrom kpi.configParser.configFileParser import ConfigFileParser\nfrom kpi.helper.common import Common\n\n\nclass TestConfigFileParser(unittest.TestCase):\n\n def setUp(self):\n \"\"\"\n setup call, runs before each test\n \"\"\"\n self.configFile = \"resources/Configuration.cfg\"\n self.classToTest = ConfigFileParser()\n self.startEpoch = \"2016 05 01 0 0 0\"\n self.endEpoch = \"2016 05 02 0 0 0\"\n self.step = \"300\"\n self.galBrc = \"brdm\"\n self.galRef = \"tum\"\n self.dcbRef = \"dcb_file.bsx\"\n self.stationList = [\"kour\", \"reyk\", \"ob4\"]\n self.leapSeconds = [('1980 1 1 0 0 0', '0'), ('1981 7 1 0 0 0', '1'),\n ('1982 7 1 0 0 0', '2'), ('1983 7 1 0 0 0', '3'),\n ('1985 7 1 0 0 0', '4'), ('1988 1 1 0 0 0', '5'),\n ('1990 1 1 0 0 0', '6'), ('1991 1 1 0 0 0', '7'),\n ('1992 7 1 0 0 0', '8'), ('1993 7 1 0 0 0', '9'),\n ('1994 7 1 0 0 0', '10'), ('1996 1 1 0 0 0', '11'),\n ('1997 7 1 0 0 0', '12'), ('1999 1 1 0 0 0', '13'),\n ('2006 1 1 0 0 0', '14'), ('2009 1 1 0 0 0', '15'),\n ('2012 7 1 0 0 0', '16'), ('2015 7 1 0 0 0', '17')]\n self.timeTagCal = [\"1981 6 30 0 0 0\", \"1981 7 1 0 0 0\",\n \"1989 12 31 0 0 0\", \"1990 1 1 0 0 0\",\n \"2011 05 01 0 5 0\", \"2016 05 01 0 10 0\",\n \"2022 05 01 0 10 0\"]\n\n def testVariableReaderSuccess(self):\n confModel = self.classToTest.variablesReader(self.configFile)\n assert confModel is not None\n\n @raises(Exception)\n def testTimeTagFileNameGeneratorNonCoherentSteps(self):\n self.classToTest.timeTagFileNamesGenerator(\n self.startEpoch, \"2016 05 02 00 00 01\", self.step, self.galBrc,\n self.galRef, self.dcbRef, self.stationList)\n\n def testConfigFileParserSuccess(self):\n timeTagCal, timeTagGps, bceList, sp3List, dcbList, observableList, \\\n LS = self.classToTest.configFileParser(self.configFile)\n assert timeTagCal[0] == self.startEpoch\n assert timeTagGps[0].GPS_WEEK == 1895\n assert timeTagGps[0].SECONDS_OF_WEEK == 0.0\n assert timeTagCal[len(timeTagCal) - 1] == \"2016 05 01 23 55 00\"\n assert timeTagGps[len(timeTagGps) - 1].GPS_WEEK == 1895\n assert timeTagGps[len(timeTagGps) - 1].SECONDS_OF_WEEK == 86100.0\n assert len(timeTagCal) == 289\n assert len(timeTagGps) == 289\n assert len(LS) == 289\n assert \"17\" in LS\n assert [\n \"/resources/References/input_files/kour1220.16o\"] in observableList\n\n def testTimeTagFileNameGeneratorSuccess(self):\n \"\"\"\n must be tested within testConfigFileParser Process.\n otherwise problems with logfile\n \"\"\"\n pass\n\n def testLeapSecondsGenerator(self):\n ls = self.classToTest.leapSecondsGenerator(self.timeTagCal,\n self.leapSeconds)\n assert ls == ['0', '1', '5', '6', '15', '17', '17']\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.6200130581855774, "alphanum_fraction": 0.6612164974212646, "avg_line_length": 32.977779388427734, "blob_id": "38446821879315f46e60acb66e550c1248effb7a", "content_id": "c183f02bc7df1125c91c1c867f4b16bc6cb84eb9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1529, "license_type": "no_license", "max_line_length": 87, "num_lines": 45, "path": "/test/sisreTest.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Author: Thomas Schneider\n# Date : 18.08.2016\n\nimport unittest\nfrom nose import *\nfrom kpi.sisre.svattitude import SVAttitude\nfrom kpi.sisre.sisremain import SISREMain\nfrom kpi.refephermisclockdecoder.refEphermisClockDecoder import RefEphermisClockDecoder\nfrom kpi.test.refEphermisClockDecoderTest import TestRefEphermisClockDecoder\nfrom kpi.configParser.configFileParser import ConfigFileParser\n\nclass SISRETest(unittest.TestCase):\n \"\"\"\n tests sv-attitude model for satellites read from an sp3c file\n \"\"\"\n\n def setUp(self):\n pass\n\n def test_ComputeRotationAndSpeedMatrices(self):\n \"\"\"tests against simple data \"\"\"\n # Common.initLogger('svattitude.log')\n start = [2016, 8, 8, 12, 0, 0]\n end = [2016, 8, 9, 12, 0, 0]\n gals = [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.1, 0.2, 0.3, 0.4, 0.5, 0.6]]\n epoch_hour = 360\n # simple test for one hour(duration ~ 300sec)\n SISREMain().calculateSVAttitudes(start, end, gals, epoch_hour)\n\n def test_ComputeRotationAndSpeedMatrices(self):\n \"\"\" tests against an sp3c file \"\"\"\n dat = TestRefEphermisClockDecoder()\n dat.setUp()\n svRefPVTCoM, dateTags = RefEphermisClockDecoder(\n ).parseSP3CFiles(dat.sp3List, dat.prnId)\n # print(\"svRevPVTCoM:\\n\", svRefPVTCoM, \"\\ndateTags:\\n\", dateTags)\n SISREMain().calculateSatellites(svRefPVTCoM, dateTags)\n\n def tearDown(self):\n \"\"\"pass\"\"\"\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.5569482445716858, "alphanum_fraction": 0.5760217905044556, "avg_line_length": 46.0512809753418, "blob_id": "0f4c0097a55bf9d1f61759e2527f06f55d101228", "content_id": "953417a23e3a3e9107d5b4bbeb8c3232cb1f9863", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1836, "license_type": "no_license", "max_line_length": 79, "num_lines": 39, "path": "/data/leapSeconds.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nclass LeapSeconds:\n \"\"\"\n Author: Bhaskar Shankar (IDV-AG)\n Date : 05 Sept 2016\n\n Assigned to Class variable of ObservationsDataHeader\n VAR - LEAP_SECONDS\n\n Parameters:\n CURRENT_NUM_OF_LEAP_SECONDS - Current Number of leap seconds\n FUTURE_OR_PAST_LEAP_SECONDS - Future or past leap seconds ΔtLSF(BNK),\n i.e. future leap second if the week\n and day number are in the future\n WEEK_NUMBER - Respective week number WN_LSF (continuous number) (BNK).\n For GPS, GAL, QZS and IRN, weeks since 6-Jan-1980.\n When BDS only file leap seconds specified,\n weeks since 1-Jan-2006.\n DAY_NUMBER - Respective day number DN (0-6) BeiDou and (1-7) for GPS\n and others constellations,(BNK). The day number is the\n GPS or BeiDou day before the leap second\n (See Note 1 below).\n In the case of the Tuesday, June 30/2015 (GPS\n Week 1851, DN 3) the UTC leap second actually occurred 16\n seconds into the next GPS day.\n TIME_SYSTEM_IDENTIFIER - Time system identifier: only GPS and BDS are\n valid identifiers. Blank defaults to GPS.\n \"\"\"\n\n def __init__(self, currentNumOfLeapSeconds=\"\", futureOrPastLeapSeconds=\"\",\n weekNumber=\"\", dayNumber=\"\", timeSystemIdentifier=\"\"):\n self.CURRENT_NUM_OF_LEAP_SECONDS = currentNumOfLeapSeconds\n self.FUTURE_OR_PAST_LEAP_SECONDS = futureOrPastLeapSeconds\n self.WEEK_NUMBER = weekNumber\n self.DAY_NUMBER = dayNumber\n self.TIME_SYSTEM_IDENTIFIER = timeSystemIdentifier\n" }, { "alpha_fraction": 0.5070977807044983, "alphanum_fraction": 0.5591482520103455, "avg_line_length": 30.700000762939453, "blob_id": "2f08346c215fdec98c52800924fc94b70064814e", "content_id": "f4fbc5c383e0f56903da356e85eebba060e0aa59", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1268, "license_type": "no_license", "max_line_length": 145, "num_lines": 40, "path": "/data/svRefPVTCoM.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nclass SvRefPVTCoM:\n \"\"\"\n SVRefPVTCoM object. inherit all the variables coming from sp3c file\n Parameter:\n PRN_ID string: satellite id\n X_com string: X_COM variable\n Y_com string: Y_COM variable\n Z_com string: Z_COM variable\n Vx_com string: vX_COM variable\n Vy_com string: vY_COM variable\n Vz_com string: vZ_COM variable\n Clk_apc_ref string: clkApcRef variable\n use:\n 1. use as constructor:\n model = SvRefPVTCoM(\"1234.12\", \"3456.22\", \"7891.12\",\n \"-1234.11\", \"313.212\", \"9841.65\",\n \"54165.65\")\n 2. give the variables separately:\n model = SvRefPVTCoM\n model.X_com = \"1234.12\"\n model.Y_com = \"8732.11\"\n model.Clk_apc_ref = \"33.44111\"\n \"\"\"\n\n def __init__(self, prnId, xCom, yCom, zCom, vXCom, vYCom, vZCom, clkApcRef):\n self.PRN_ID = prnId\n self.X_com = xCom\n self.Y_com = yCom\n self.Z_com = zCom\n self.Vx_com = vXCom\n self.Vy_com = vYCom\n self.Vz_com = vZCom\n self.Clk_apc_ref = clkApcRef\n\n def __str__(self):\n return \"POS(\" + self.X_com + \", \" + self.Y_com + \", \" + self.Z_com + \") V(\" + self.vX_COM + \", \" + self.Vy_com + \", \" + self.Vz_com + \")\"\n" }, { "alpha_fraction": 0.43396225571632385, "alphanum_fraction": 0.5324947834014893, "avg_line_length": 14.387096405029297, "blob_id": "2cc7e0216c81efc3f25d98d2bb266ba7292d9366", "content_id": "cd5665d470fe5be8441a99ed46170d04eb1920fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 477, "license_type": "no_license", "max_line_length": 54, "num_lines": 31, "path": "/helper/kpiConstants.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nclass KpiConstants:\n \"\"\"\n Contains all the constants for the kpi project\n \"\"\"\n\n fE1 = 1575.42\n \"\"\"\n Galileo Frequency\n \"\"\"\n fE5a = 1176.45\n \"\"\"\n Galileo Frequency\n \"\"\"\n fE5b = 1207.14\n \"\"\"\n Galileo Frequency\n \"\"\"\n\n ggC = 3.986004418e+14\n \"\"\"\n geocentric gravitational constant\n \"\"\"\n\n OMEGAe = 7.2921151467e-05\n \"\"\"\n mean angular velofity of the earth\n \"\"\"\n" }, { "alpha_fraction": 0.5711159706115723, "alphanum_fraction": 0.5733041763305664, "avg_line_length": 23.052631378173828, "blob_id": "ee78251a5ccae9a6f1de3b270221670e8c2efd6d", "content_id": "eea4ae07d04a4902ceff64f3a427cf68dc628ac5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 457, "license_type": "no_license", "max_line_length": 49, "num_lines": 19, "path": "/data/timeTagGpsModel.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nclass TimeTagGpsModel:\n \"\"\"\n Represent the GPS Time Tag\n Parameter:\n GPS_WEEK int: the gps week\n SECONDS_OF_WEEK int: the seconds of this week\n \"\"\"\n\n def __init__(self, gpsWeek, secodsOfWeek):\n self.GPS_WEEK = gpsWeek\n self.SECONDS_OF_WEEK = secodsOfWeek\n\n def printMe(self):\n print(\"(\" + int(self.GPS_WEEK) + \", \" +\n int(self.SECONDS_OF_WEEK) + \")\")\n" }, { "alpha_fraction": 0.6005547642707825, "alphanum_fraction": 0.6019417643547058, "avg_line_length": 31.772727966308594, "blob_id": "dca956fbf502a3d985acdcd9dbcddce5cc387aa1", "content_id": "8357bc72629139fae6debf4800e01cda64345df7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 721, "license_type": "no_license", "max_line_length": 68, "num_lines": 22, "path": "/data/antennaPhaseCenter.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nclass AntennaPhaseCenter:\n \"\"\"\n Average phase center position w/r to antenna reference point (m)\n\n Satellite system (G/R/E/J/C/I/S)\n Observation code\n North/East/Up (fixed station) or\n X/Y/Z in body-fixed system (vehicle)\n \"\"\"\n\n def __init__(self, satelliteSystem=\"\", observationCode=\"\",\n northEastUp=\"\", bodyFixedPositionXYZ=\"\"):\n self.SATELLITE_SYSTEM = satelliteSystem\n self.OBSERVATION_CODE = observationCode\n # AntennaNorthEastUp\n self.NORTH_EAST_UP = northEastUp\n # PositionVectorXYZ\n self.BODY_FIXED_POSITION_XYZ = bodyFixedPositionXYZ\n" }, { "alpha_fraction": 0.6297520399093628, "alphanum_fraction": 0.6314049363136292, "avg_line_length": 29.25, "blob_id": "8bd3c6a1ec2c42a32b616b7d6767f784893d6ccc", "content_id": "d3237acd09663742233928c48c97fda13eb4ea12", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 605, "license_type": "no_license", "max_line_length": 63, "num_lines": 20, "path": "/data/antennaOffsetDecoderModel.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nclass AntennaOffsetDecoderModel:\n \"\"\"\n The output list Data from AntennaOffsetParser Class\n Contains offset data for navigation and precise reference\n orbit data per epoch per prn\n Parameters:\n PRNID = Satellite PRN\n EPOCH = A EPOCH from the START and END epoch\n OFFSETDATA = Offset data with AntennaNorthEastUp object\n for north ,east and up\n \"\"\"\n\n def __init__(self, prnId=\"\", epoch=\"\", offsetData=\"\"):\n self.PRNID = prnId\n self.EPOCH = epoch\n self.OFFSETDATA = offsetData\n" }, { "alpha_fraction": 0.7972972989082336, "alphanum_fraction": 0.7972972989082336, "avg_line_length": 17.5, "blob_id": "2a394c9b03688931a0abe5227036a3c66664d044", "content_id": "6823dee89c1efca7e1e104b0c9a0ac6c36c91c37", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 74, "license_type": "no_license", "max_line_length": 45, "num_lines": 4, "path": "/README.md", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "# kpi \nKPI Monitoring Tool\n\nBeispiel-Berechnung eines SV-Attitude Models.\n" }, { "alpha_fraction": 0.6385964751243591, "alphanum_fraction": 0.6432748436927795, "avg_line_length": 36.173912048339844, "blob_id": "23f94c659e6eb5ea6fd7e1079dd1c9e63baa8d89", "content_id": "f430687758cf1c54bbeecf573e325d6d81c16c50", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 855, "license_type": "no_license", "max_line_length": 72, "num_lines": 23, "path": "/data/antennaPhaseOffset.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nclass AntennaPhaseOffset:\n \"\"\"\n Class defined in reference with 'antexFileDecoder.m' and antex14.txt\n PVC_TYPE = Phase center variation type\n SERIAL_NUMBER = Receiver antenna serial number\n NUMBER_OF_FREQEUNCIES = Number of frequencies, for which phase\n patterns are stored for the current antenna\n VALID_FROM = Start of validity period in GPS time\n VALID_UNTIL = End of validity period in GPS time\n \"\"\"\n\n def __init__(self, pvcType=\"\", serialNum=\"\", numOfFreq=0,\n validFrom=\"\", validUntil=\"\", freqList=\"\"):\n self.PVC_TYPE = pvcType\n self.SERIAL_NUMBER = serialNum\n self.NUMBER_OF_FREQEUNCIES = numOfFreq\n self.VALID_FROM = validFrom\n self.VALID_UNTIL = validUntil\n self.FREQUENCY_LIST = freqList\n" }, { "alpha_fraction": 0.47371482849121094, "alphanum_fraction": 0.4831998646259308, "avg_line_length": 49.567874908447266, "blob_id": "5c395b7b09296b63a1b79714ff2e87213a236cab", "content_id": "d7bfbf89d5c7cb6dc142bb7798fb5e0e3dd6f984", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 22351, "license_type": "no_license", "max_line_length": 93, "num_lines": 442, "path": "/observationFilesDecoderModule/observationFilesDecoder.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport sys\nfrom kpi.helper.common import Common\nfrom kpi.data.observationRecords import Observationrecords\nfrom kpi.data.observationsDataHeader import ObservationsDataHeader\nfrom kpi.data.stationObservationsData import StationObservationsData\nfrom kpi.data.observationPrnIdAndRecords import ObservationPrnIdAndRecords\nfrom kpi.data.positionVectorXYZ import PositionVectorXYZ\nfrom kpi.data.antennaNorthEastUp import AntennaNorthEastUp\nfrom kpi.data.antennaPhaseCenter import AntennaPhaseCenter\nfrom kpi.data.sysObservationTypes import SysObservationTypes\nfrom kpi.data.sysCorrectionApplied import SysCorrectionApplied\nfrom kpi.data.sysScaleFactor import SysScaleFactor\nfrom kpi.data.sysPhaseShift import SysPhaseShift\nfrom kpi.data.glonassSlotFreqNumbers import GlonassSlotFreqNumbers\nfrom kpi.data.satellitesFrequencies import SatellitesFrequencies\nfrom kpi.data.glonassCodPhsBis import GlonassCodPhsBis\nfrom kpi.data.leapSeconds import LeapSeconds\nfrom kpi.data.prnAndNumOfObs import PrnAndNumOfObs\n\n\nclass ObservationFilesDecoder:\n \"\"\"\n Reads observation files in RINEX 3 Observables format files listed\n and transforms them into an internal variable. The format of RINEX 3\n Observables files is described in Table A2 and A3 in [1]. This module\n shall only output observation data for the corresponding Galileo\n satellites listed in prn_id\n \"\"\"\n\n log = Common.getLogger(__name__)\n\n def observationFilesDecoder(self, observablesFiles, observablesList,\n prnId):\n \"\"\"\n For each of the file names contained in Observables_list, this\n modules needs to parse the data contained in each individual\n file and return the corresponding header and observations\n corresponding to the satellites contained in prn_id. All this\n information must be properly stored and output.\n\n Note that each record has an associate time tag in calendar\n format that needs to be consistently stored\n\n Parameters:\n Observables files - Observables files in RINEX 3 format that need\n to be parsed\n Observables_list - Contains the list of names of all observables\n files in RINEX 3 format that need to be parsed.\n prn_id - List of satellites id to be included in the\n computation\n Return:\n Station Observations data - Contains daily observations of Galileo\n satellites for the listed stations in\n Observables_list.\n \"\"\"\n\n self.log.info(\"Start Observation Files Decoder\")\n stationObservationData = []\n observationData = StationObservationsData()\n for observableFile in observablesFiles:\n filePath = observableFile.split(\"/\")\n if filePath[len(filePath) - 1] in observablesList:\n observationData.OBSERVABLES_FILE_NAME = filePath[len(filePath) - 1]\n observationData.OBSERVABLES_HEADER = (self.extractObservationsFileHeaderData(\n observableFile))\n observationData.OBSERVATION_RECORDS = (\n self.getObservationRecords(observableFile, prnId))\n stationObservationData.append(observationData)\n return stationObservationData\n\n def getObservationRecords(self, observationFile, prnId):\n \"\"\"\n Get Timetagcals , PNRID and Observation Records\n \"\"\"\n allObsRecData = []\n\n obsRecData = open(observationFile)\n if obsRecData is None:\n self.log.error(\"Observable File %s was not found.\",\n observationFile)\n else:\n for obsRecLine in obsRecData:\n if \"END OF HEADER\" in obsRecLine:\n break\n pnrRecordListPerTimeTagCal = []\n obsRecObj = Observationrecords()\n lastObsRecObj = Observationrecords()\n for obsRecLine in obsRecData:\n if \">\" not in obsRecLine:\n pnrAndRec = ObservationPrnIdAndRecords()\n if obsRecLine[0:3] in prnId:\n pnrAndRec.PRN_ID = obsRecLine[0:3]\n pnrAndRec.OBSERVATION_RECORDS = (\n obsRecLine.split()[1:len(obsRecLine.split())])\n pnrRecordListPerTimeTagCal.append(pnrAndRec)\n obsRecObj.OBSERVATION_PRNID_AND_RECORDS = pnrRecordListPerTimeTagCal\n lastObsRecObj = obsRecObj\n elif \">\" in obsRecLine:\n if obsRecObj.TIME_TAG_CAL.strip():\n allObsRecData.append(obsRecObj)\n obsRecObj = Observationrecords()\n obsRecObj.TIME_TAG_CAL = obsRecLine[1:40]\n pnrRecordListPerTimeTagCal = []\n # Data from last object\n allObsRecData.append(lastObsRecObj)\n obsRecData.close()\n return allObsRecData\n\n def extractObservationsFileHeaderData(self, observationFile):\n \"\"\"\n Extract the header data from given observationFile\n \"\"\"\n headerData = ObservationsDataHeader()\n header = open(observationFile)\n if header is None:\n self.log.error(\"Observable File %s was not found.\",\n observationFile)\n raise Exception(\n \"Observable File %s was not found\")\n else:\n for headerLine in header:\n if \"END OF HEADER\" in headerLine:\n break\n header.close()\n else:\n if \"RINEX VERSION / TYPE\" in headerLine:\n versionTyp = headerLine.split()\n headerData.RINEX_VERSION = versionTyp[0]\n headerData.RINEX_TYPE = versionTyp[1]\n headerData.SATELLITE_SYSTEM = versionTyp[2]\n # Goto nextline\n headerLine = next(header)\n\n if \"PGM / RUN BY / DATE\" in headerLine:\n headerData.PROGRAM_CREATING_FILE = headerLine[0:20]\n headerData.AGENCY_CREATING_FILE = headerLine[20:40]\n headerData.FILE_CREATION_DATE = headerLine[40:60]\n # Goto nextline\n headerLine = next(header)\n\n if \"MARKER NAME\" in headerLine:\n headerData.MARKER_NAME = headerLine[0:60]\n # Goto nextline\n headerLine = next(header)\n\n if \"MARKER NUMBER\" in headerLine:\n headerData.MARKER_NUMBER = headerLine[0:60]\n # Goto nextline\n headerLine = next(header)\n\n if \"MARKER TYPE\" in headerLine:\n headerData.MARKER_TYPE = headerLine[0:20]\n # Goto nextline\n headerLine = next(header)\n\n if \"OBSERVER / AGENCY\" in headerLine:\n headerData.OBSERVER_NAME = headerLine[0:20]\n headerData.AGENCY_NAME = headerLine[20:40]\n # Goto nextline\n headerLine = next(header)\n\n if \"REC # / TYPE / VERS\" in headerLine:\n headerData.RECEIVER_NUMBER = headerLine[0:20]\n headerData.RECEIVER_TYPE = headerLine[20:40]\n headerData.RECEVIER_VERSION = headerLine[40:60]\n # Goto nextline\n headerLine = next(header)\n\n if \"ANT # / TYPE\" in headerLine:\n headerData.ANTENNA_NUMBER = headerLine[0:20]\n headerData.ANTENNA_TYPE = headerLine[20:40]\n # Goto nextline\n headerLine = next(header)\n\n if \"APPROX POSITION XYZ\" in headerLine:\n position = headerLine.split()\n positionVextorXyz = PositionVectorXYZ(\n position[0], position[1], position[2])\n headerData.APPROX_MARKER_POSITION = positionVextorXyz\n # Goto nextline\n headerLine = next(header)\n\n if \"ANTENNA: DELTA H/E/N\" in headerLine:\n heightEastNorth = headerLine.split()\n headerData.ANTENNA_DELTA_HEN = AntennaNorthEastUp(\n heightEastNorth[2], heightEastNorth[1],\n heightEastNorth[0])\n # Goto nextline\n headerLine = next(header)\n\n if \"ANTENNA: DELTA X/Y/Z\" in headerLine:\n deltaXYZ = headerLine.split()\n headerData.ANTENNA_DELTA_XYZ = PositionVectorXYZ(\n deltaXYZ[0], deltaXYZ[1],\n deltaXYZ[2])\n # Goto nextline\n headerLine = next(header)\n\n if \"ANTENNA:PHASECENTER\" in headerLine:\n splitData = headerLine.split()\n phCentXYZ = PositionVectorXYZ(\n splitData[2], splitData[3],\n splitData[4])\n phCentNEU = AntennaNorthEastUp(\n splitData[2], splitData[3],\n splitData[4])\n headerData.ANTENNA_PHASE_CENTER = AntennaPhaseCenter\n (splitData[0], splitData[1], phCentNEU, phCentXYZ)\n # Goto nextline\n headerLine = next(header)\n\n if \"ANTENNA: B.SIGHT XYZ\" in headerLine:\n sightXYZ = headerLine.split()\n headerData.ANTENNA_B_SIGHT_XYZ = AntennaNorthEastUp(\n sightXYZ[0], sightXYZ[1], sightXYZ[2])\n # Goto nextline\n headerLine = next(header)\n\n if \"ANTENNA: ZERODIR AZI\" in headerLine:\n headerData.ANTENNA_ZERODIR_AZI = headerLine[0:20]\n # Goto nextline\n headerLine = next(header)\n\n if \"ANTENNA: ZERODIR XYZ\" in headerLine:\n zeroDirXYZ = headerLine.split()\n headerData.ANTENNA_B_SIGHT_XYZ = AntennaNorthEastUp(\n zeroDirXYZ[0], zeroDirXYZ[1], zeroDirXYZ[2])\n # Goto nextline\n headerLine = next(header)\n\n if \"CENTER OF MASS: XYZ\" in headerLine:\n comXYZ = headerLine.split()\n headerData.CENTER_OF_MASS_XYZ = PositionVectorXYZ(\n comXYZ[0], comXYZ[1],\n comXYZ[2])\n # Goto nextline\n headerLine = next(header)\n\n if ((\"SYS / # / OBS TYPES\" in headerLine) and\n (len(headerData.SYS_NUMBER_OBS_TYPES) == 0)):\n sysObservationTypeData = []\n observationDescriptors = []\n # Loop over all Satellite System Codes\n while \"SYS / # / OBS TYPES\" in headerLine:\n\n sysObsTypes = SysObservationTypes()\n\n observationDescriptors = []\n if headerLine[0:5].strip():\n sysObsData = headerLine[0:59].split()\n sysObsTypes.SATELLITE_SYSTEM_CODE = sysObsData[\n 0]\n sysObsTypes.NUMBER_OF_OBSERVATION_TYPES = sysObsData[\n 1]\n observationDescriptors = sysObsData[2:15]\n headerLine = next(header)\n # More than 13 observation descriptors are in\n # next lines\n while not headerLine[0:5].strip():\n observationDescriptors = (\n observationDescriptors + headerLine[5:59].split())\n headerLine = next(header)\n sysObsTypes.OBSERVATION_DESCRIPTORS = observationDescriptors\n observationDescriptors = []\n sysObservationTypeData.append(sysObsTypes)\n headerData.SYS_NUMBER_OBS_TYPES = sysObservationTypeData\n\n if ((\"SYS / PHASE SHIFT\" in headerLine) and\n (len(headerData.SYS_PHASE_SHIFTS) == 0)):\n sysPhaseShiftData = []\n satelliteList = []\n while \"SYS / PHASE SHIFT\" in headerLine:\n phaseShift = SysPhaseShift()\n satelliteList = []\n if headerLine[0:16].strip():\n sysPhaseData = headerLine[0:59].split()\n phaseShift.SATELLITE_SYSTEM = sysPhaseData[\n 0]\n phaseShift.CARRIER_PHASE_OBSERVATION_CODE = sysPhaseData[\n 1]\n phaseShift.CORRECTION_APPLIED = sysPhaseData[2]\n phaseShift.NUMBER_OF_SATELLITES = headerLine[\n 15:19]\n satelliteList = headerLine[20:59].split()\n headerLine = next(header)\n # More than 13 observation descriptors are in\n # next lines\n while not headerLine[0:16].strip():\n satelliteList = (\n satelliteList + headerLine[16:59].split())\n headerLine = next(header)\n phaseShift.LIST_OF_SATELLITES = satelliteList\n satelliteList = []\n sysPhaseShiftData.append(phaseShift)\n headerData.SYS_PHASE_SHIFTS = sysPhaseShiftData\n\n if \"SIGNAL STRENGTH UNIT\" in headerLine:\n headerData.SIGNAL_STRENGTH_UNIT = headerLine[0:20]\n # Goto nextline\n headerLine = next(header)\n\n if \"INTERVAL\" in headerLine:\n headerData.INTERVAL = headerLine.split()[0]\n # Goto nextline\n headerLine = next(header)\n\n if \"TIME OF FIRST OBS\" in headerLine:\n headerData.TIME_OF_FIRST_OBS = headerLine[0:52]\n # Goto nextline\n headerLine = next(header)\n\n if \"TIME OF LAST OBS\" in headerLine:\n headerData.TIME_OF_LAST_OBS = headerLine[0:52]\n # Goto nextline\n headerLine = next(header)\n\n if \"RCV CLOCK OFFS APPL\" in headerLine:\n headerData.RCV_CLOCK_OFFS_APPL = headerLine.split()[0]\n # Goto nextline\n headerLine = next(header)\n\n if \"SYS / DCBS APPLIED\" in headerLine:\n dcbsCorrection = SysCorrectionApplied()\n dcbsCorrection.SATELLITE_SYSTEM = headerLine.split()[0]\n dcbsCorrection.CORRECTION_PROGRAM = headerLine.split()[\n 1]\n dcbsCorrection.SOURCE_OF_CORRECTION = headerLine.split()[\n 2]\n headerData.SYS_DCBS_APPLIED = dcbsCorrection\n # Goto nextline\n headerLine = next(header)\n\n if \"SYS / PCVS APPLIED\" in headerLine:\n pcvsCorrection = SysCorrectionApplied()\n pcvsCorrection.SATELLITE_SYSTEM = headerLine.split()[0]\n pcvsCorrection.CORRECTION_PROGRAM = headerLine.split()[\n 1]\n pcvsCorrection.SOURCE_OF_CORRECTION = headerLine.split()[\n 2]\n headerData.SYS_PCVS_APPLIED = pcvsCorrection\n # Goto nextline\n headerLine = next(header)\n\n if ((\"SYS / SCALE FACTOR\" in headerLine) and\n (len(headerData.SYS_SCALE_FACTOR) == 0)):\n sysScaleFactorData = []\n observationTypes = []\n # Loop over all Satellite System Codes\n while \"SYS / SCALE FACTOR\" in headerLine:\n sysObsTypes = SysScaleFactor()\n observationTypes = []\n if headerLine[0:8].strip():\n sysObsData = headerLine[0:59].split()\n sysObsTypes.SATELLITE_SYSTEM = sysObsData[\n 0]\n sysObsTypes.DIVIDING_FACTOR = sysObsData[\n 1]\n sysObsTypes.NUM_OBSERVATION_TYPES = headerLine[\n 6:9]\n observationTypes = sysObsData[2:15]\n headerLine = next(header)\n # More than 13 observation descriptors are in\n # next lines\n while not headerLine[0:5].strip():\n observationTypes = (\n observationTypes + headerLine[10:59].split())\n headerLine = next(header)\n sysObsTypes.OBSERVATION_TYPE_LIST = observationTypes\n observationTypes = []\n sysScaleFactorData.append(sysObsTypes)\n headerData.SYS_SCALE_FACTOR = sysScaleFactorData\n\n if \" OF SATELLITES\" in headerLine:\n headerData.NUMBER_OF_SATELLITES = headerLine.split()[0]\n\n if ((\"GLONASS SLOT / FRQ\" in headerLine) and\n (len(headerData.GLONASS_SLOT_FREQ_NUMBERS) == 0)):\n satFreqList = []\n glonassData = GlonassSlotFreqNumbers()\n glonassData.NUMBER_OF_SATELLITES = headerLine[1:4]\n while \"GLONASS SLOT / FRQ\" in headerLine:\n satFreqList = satFreqList + \\\n headerLine[4:59].split()\n headerLine = next(header)\n count = 0\n satFreqCount = int(len(satFreqList) / 2)\n satFreqObjList = []\n while (count < satFreqCount):\n satObj = SatellitesFrequencies()\n satObj.SATELLITE_NUMBER = satFreqList[count * 2]\n satObj.FREQUENCY_NUMBER = satFreqList[\n count * 2 + 1]\n satFreqObjList.append(satObj)\n count = count + 1\n glonassData.LIST_OF_SATELLITES_FREQUENCIES = satFreqObjList\n headerData.GLONASS_SLOT_FREQ_NUMBERS = glonassData\n\n if \"GLONASS COD/PHS/BIS\" in headerLine:\n glonassDataList = []\n glonassData = headerLine[0:60].split()\n\n count = 0\n dataCount = int(len(glonassData) / 2)\n while count < dataCount:\n glonassObj = GlonassCodPhsBis()\n glonassObj.GLONASS_SIGNAL_IDENTIFIER = glonassData[\n count * 2]\n glonassObj.CODE_PHASE_BIAS_CORRECTION = glonassData[\n count * 2 + 1]\n glonassDataList.append(glonassObj)\n count = count + 1\n headerData.GLONASS_COD_PHS_BIS = glonassDataList\n\n if \"LEAP SECONDS\" in headerLine:\n leapSeconds = LeapSeconds()\n leapSeconds.CURRENT_NUM_OF_LEAP_SECONDS = headerLine.split()[\n 0]\n leapSeconds.FUTURE_OR_PAST_LEAP_SECONDS = headerLine.split()[\n 1]\n leapSeconds.WEEK_NUMBER = headerLine.split()[2]\n leapSeconds.DAY_NUMBER = headerLine.split()[3]\n leapSeconds.TIME_SYSTEM_IDENTIFIER = headerLine.split()[\n 4]\n headerData.LEAP_SECONDS = leapSeconds\n # Goto nextline\n headerLine = next(header)\n\n if \"PRN / # OF OBS\" in headerLine:\n prnObs = PrnAndNumOfObs()\n prnObs.SATELLITE_NUMBERS = headerLine.split()[0]\n obsList = headerLine[0:59].split()\n headerLine = next(header)\n while \"PRN / # OF OBS\" in headerLine:\n obsList = obsList + headerLine[0:59].split()\n headerLine = next(header)\n prnObs.NUM_OF_OBS_PER_OBS_TYPE = obsList\n headerData.PRN_AND_NUM_OF_OBS = prnObs\n\n header.close()\n return headerData\n" }, { "alpha_fraction": 0.7228915691375732, "alphanum_fraction": 0.7228915691375732, "avg_line_length": 82, "blob_id": "458e624432d6072ce46023719a7190b62fd00964", "content_id": "46f867ee3006e1b5e0f5db4ac345a8f7ec7afd02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 83, "license_type": "no_license", "max_line_length": 82, "num_lines": 1, "path": "/nose.sh", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "nosetests --all-modules -vv --exe --with-coverage --cover-package=kpi --cover-html\n" }, { "alpha_fraction": 0.6976743936538696, "alphanum_fraction": 0.6976743936538696, "avg_line_length": 20.5, "blob_id": "9a76a6fd02fbaaab27be198810d13a2d5a346020", "content_id": "db4ca7c953b1f0b1d1626efd8bdd23ca5978f1aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 43, "license_type": "no_license", "max_line_length": 22, "num_lines": 2, "path": "/__init__.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "# export kpi as module\n# from kpi import *\n" }, { "alpha_fraction": 0.5188488364219666, "alphanum_fraction": 0.5234954953193665, "avg_line_length": 54.1363639831543, "blob_id": "4a9b9852d3202c41930419ffb968ae81905da632", "content_id": "de5500c23c9d799cb95a44bfb714afd7fcdfdf47", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13348, "license_type": "no_license", "max_line_length": 79, "num_lines": 242, "path": "/data/observationsDataHeader.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nclass ObservationsDataHeader:\n \"\"\"\n Author: Bhaskar Shankar (IDV-AG)\n Date : 05 Sept 2016\n\n MODULE: 5.6 Observation files decoder module\n\n Class Variable for StationObservationsData - OBSERVABLES_HEADER\n\n Description:\n Header model for observation files in RINEX 3 Observables\n format.\n\n Parameters:\n RINEX_VERSION - Format version : 3.03\n RINEX_TYPE - File type: O for Observation Data\n SATELLITE_SYSTEM - Satellite System:\n G: GPS, R: GLONASS, E: Galileo, J: QZSS\n C: BDS, I: IRNSS ,S: SBAS payload ,M: Mixed\n PROGRAM_CREATING_FILE- Name of program creating current file\n AGENCY_CREATING_FILE - Name of agency creating current file\n FILE_CREATION_DATE - Date and time of file creation\n MARKER_NAME - Name of antenna marker\n MARKER_NUMBER - Number of the antenna marker\n (optional)\n MARKER_TYPE - Type of the marker\n OBSERVER_NAME - Name of observer\n AGENCY_NAME - Name of agency\n RECEIVER_NUMBER - Receiver number\n RECEIVER_TYPE - Receiver type\n RECEVIER_VERSION - Receiver version\n ANTENNA_NUMBER - Antenna number\n ANTENNA_TYPE - Antenna type\n APPROX_MARKER_POSITION - (Class Ref: PositionVectorXYZ)\n Geocentric approximate marker position\n (Units: Meters, System: ITRS recommended)\n Optional for moving platforms\n ANTENNA_DELTA_HEN - (Class Ref: AntennaNorthEastUp)\n - Antenna height: Height of the antenna\n reference point (ARP) above the marker\n - Horizontal eccentricity of ARP relative to the\n marker (east/north)\n All units in meters\n ANTENNA_DELTA_XYZ - (Class Ref: PositionVectorXYZ)\n (optional) Position of antenna reference point for antenna\n on vehicle (m): XYZ vector in body-fixed\n coord. system\n ANTENNA_PHASE_CENTER - (Class Ref: AntennaPhaseCenter)\n (optional) Average phase center position w/r to antenna\n reference point (m)\n - Satellite system (G/R/E/J/C/I/S)\n - Observation code\n - North/East/Up (fixed station) or\n - X/Y/Z in body-fixed system (vehicle)\n ANTENNA_B_SIGHT_XYZ - (Class Ref: AntennaNorthEastUp)\n (optional) - Direction of the “vertical” antenna axis\n towards the GNSS satellites.\n Antenna on vehicle: Unit vector in\n body-fixed coordinate system.\n Tilted antenna on fixed station: Unit vector in\n N/E/Up left-handed system.\n ANTENNA_ZERODIR_AZI - Azimuth of the zero-direction of a fixed\n (optional) antenna (degrees, from north)\n ANTENNA_ZERODIR_XYZ - (Class Ref: AntennaNorthEastUp)\n (optional) Zero-direction of antenna Antenna on vehicle:\n Unit vector in body-fixed coordinate system\n Tilted antenna on fixed station: Unit vector in\n N/E/Up left-handed system\n CENTER_OF_MASS_XYZ - (Class Ref: PositionVectorXYZ)\n (optional) - Current center of mass (X,Y,Z, meters) of\n vehicle in body-fixed coordinate system. Same\n system as used for attitude.\n SYS_NUMBER_OBS_TYPES - (Class Ref: SystemObservationTypes)\n - Satellite system code (G/R/E/J/C/I/S)\n - Number of different observation types for the\n - Observation descriptors\n SIGNAL_STRENGTH_UNIT - Unit of the carrier to noise ratio observables\n (optional) Snn (if present) DBHZ : S/N given in dbHz\n\n INTERVAL (optional) - Observation interval in seconds\n\n TIME_OF_FIRST_OBS - Time of first observation record (4-digit-year,\n (optional) month, day, hour, min, sec)\n\n TIME_OF_LAST_OBS - Time of last observation record (4-digit-year,\n (optional) month,day,hour,min,sec)\n\n RCV_CLOCK_OFFS_APPL - Epoch, code, and phase are corrected by\n (optional) applying the realtime-derived receiver clock\n offset: 1=yes, 0=no; default: 0=no Record\n required if clock offsets are reported in the\n EPOCH/SAT records\n\n SYS_DCBS_APPLIED - (Class Ref: SysCorrectionApplied)\n (optional) - Satellite system (G/R/E/J/C/I/S)\n - Program name used to apply differential code\n bias corrections\n - Source of corrections (URL)\n Repeat for each satellite system.\n No corrections applied:Blank fields or record\n not present.\n SYS_PCVS_APPLIED - (Class Ref: SysCorrectionApplied )\n (optional) - Satellite system (G/R/E/J/C/I/S)\n - Program name used to apply phase center\n variation corrections\n - Source of corrections (URL)\n Repeat for each satellite system.\n No corrections applied: Blank fields or\n record not present.\n SYS_SCALE_FACTOR - (Class Ref: SysScaleFactor)\n (optional) - Satellite system (G/R/E/J/C/I/S)\n - Factor to divide stored observations with\n before use (1,10,100,1000)\n\n SYS_PHASE_SHIFTS - (Class Ref: (List) SysPhaseShift)\n Phase shift correction used to generate phases\n consistent w/r to cycle shifts S\n - Satellite system (G/R/E/J/C/I/S)\n - Carrier phase observation code:\n - Correction applied (cycles)\n - List of satellites\n - Number of satellites involved 0 or blank:\n All satellites of system\n Repeat the record for all affected codes.\n\n GLONASS_SLOT_FREQ_NUMBERS - (Class Ref: GlonassSlotFreqNumbers)\n GLONASS slot and frequency numbers\n - Number of satellites in list\n List of :\n - Satellite numbers (system code, slot)\n - Frequency numbers (-7...+6)\n GLONASS_COD_PHS_BIS - (Class Ref: GlonassCodPhsBis)\n GLONASS Phase bias correction used to align\n code and phase observations.\n GLONASS signal identifier : C1C , C2C, C1P, C2P\n and Code Phase bias correction (metres)\n LEAP_SECONDS - (Class Ref: LeapSeconds)\n (optional) - Current Number of leap seconds\n Future or past leap seconds ΔtLSF(BNK) , i.e.\n future leap second if the week and day number\n are in the future.\n - Respective week number WN_LSF\n (continuous number) (BNK). For GPS, GAL,\n QZS and IRN, weeks since 6-Jan-1980. When\n BDS only file leap seconds specified, weeks\n since 1-Jan-2006.\n - Respective day number DN (0-6) BeiDou and\n (1-7) for GPS and others constellations,\n (BNK). The day number is the GPS or BeiDou\n day before the leap second.\n In the case of the Tuesday, June 30/2015 (GPS\n Week 1851, DN 3) the UTC leap second actually\n occurred 16 seconds into the next GPS\n day.\n - Time system identifier: only GPS and BDS are\n valid identifiers. Blank defaults to GPS see\n Notes section below.\n NUMBER_OF_SATELLITES - Number of satellites, for which observations\n (optional) are stored in the file\n \n PRN_AND_NUM_OF_OBS - (Class Ref: (List) PrnAndNumOfObs)\n (optional) Satellite numbers, number of observations\n for each observation type indicated in the\n \"SYS / # / OBS TYPES\" record\n \"\"\"\n\n def __init__(self, rinexVersion=\"\", rinex_type=\"\", satelliteSystem=\"\",\n programCreatingFile=\"\", agencyCreatingFile=\"\",\n fileCreationDate=\"\", markerName=\"\", markerNumber=\"\",\n markerType=\"\", observerName=\"\", agencyName=\"\",\n receiverNumber=\"\", receiverType=\"\", recevierVersion=\"\",\n antennaNumber=\"\", antennaType=\"\", approxMarkerPosition=\"\",\n antennaDeltaHen=\"\", antennaDeltaXyz=\"\", antennaPhaseCenter=\"\",\n antennaBSightXyz=\"\", antennaZerodirAzi=\"\",\n antennaZerodirXyz=\"\", centerOfMassXyz=\"\",\n sysNumberObsTypes=\"\", signalStrengthUnit=\"\",\n interval=\"\", timeOfFirstObs=\"\", timeOfLastObs=\"\",\n rcvClockOffsAppl=\"\", sysDcbsApplied=\"\", sysPcvsApplied=\"\",\n sysScaleFactor=\"\", sysPhaseShifts=\"\",\n glonassSlotFreqNumbers=\"\", glonassCodPhsBis=\"\",\n leapSeconds=\"\", numberOfSatellites=\"\",\n prnAndNumOfObs=\"\"):\n self.RINEX_VERSION = rinexVersion\n self.RINEX_TYPE = rinex_type\n self.SATELLITE_SYSTEM = satelliteSystem\n self.PROGRAM_CREATING_FILE = programCreatingFile\n self.AGENCY_CREATING_FILE = agencyCreatingFile\n self.FILE_CREATION_DATE = fileCreationDate\n self.MARKER_NAME = markerName\n self.MARKER_NUMBER = markerNumber\n self.MARKER_TYPE = markerType\n self.OBSERVER_NAME = observerName\n self.AGENCY_NAME = agencyName\n self.RECEIVER_NUMBER = receiverNumber\n self.RECEIVER_TYPE = receiverType\n self.RECEVIER_VERSION = recevierVersion\n self.ANTENNA_NUMBER = antennaNumber\n self.ANTENNA_TYPE = antennaType\n # Class Ref: PositionVectorXYZ\n self.APPROX_MARKER_POSITION = approxMarkerPosition\n # Class Ref: AntennaNorthEastUp\n self.ANTENNA_DELTA_HEN = antennaDeltaHen\n # Class Ref: PositionVectorXYZ\n self.ANTENNA_DELTA_XYZ = antennaDeltaXyz\n # Class Ref: AntennaPhaseCenter\n self.ANTENNA_PHASE_CENTER = antennaPhaseCenter\n # Class Ref: AntennaNorthEastUp\n self.ANTENNA_B_SIGHT_XYZ = antennaBSightXyz\n self.ANTENNA_ZERODIR_AZI = antennaZerodirAzi\n # Class Ref: AntennaNorthEastUp\n self.ANTENNA_ZERODIR_XYZ = antennaZerodirXyz\n # Class Ref: PositionVectorXYZ\n self.CENTER_OF_MASS_XYZ = centerOfMassXyz\n # Class Ref: SystemObservationTypes\n self.SYS_NUMBER_OBS_TYPES = sysNumberObsTypes\n self.SIGNAL_STRENGTH_UNIT = signalStrengthUnit\n self.INTERVAL = interval\n self.TIME_OF_FIRST_OBS = timeOfFirstObs\n self.TIME_OF_LAST_OBS = timeOfLastObs\n # required if clock offsets are reported in the EPOCH/SAT records\n self.RCV_CLOCK_OFFS_APPL = rcvClockOffsAppl\n # Class Ref: SysCorrectionApplied\n self.SYS_DCBS_APPLIED = sysDcbsApplied\n # Class Ref: SysCorrectionApplied\n self.SYS_PCVS_APPLIED = sysPcvsApplied\n # Class Ref: SysScaleFactor\n self.SYS_SCALE_FACTOR = sysScaleFactor\n # Class Ref: (List) SysPhaseShift\n self.SYS_PHASE_SHIFTS = sysPhaseShifts\n # Class Ref: GlonassSlotFreqNumbers\n self.GLONASS_SLOT_FREQ_NUMBERS = glonassSlotFreqNumbers\n # Class Ref: GlonassCodPhsBis\n self.GLONASS_COD_PHS_BIS = glonassCodPhsBis\n # Class Ref: LeapSeconds\n self.LEAP_SECONDS = leapSeconds\n self.NUMBER_OF_SATELLITES = numberOfSatellites\n # Class Ref: (List) PrnAndNumOfOb\n self.PRN_AND_NUM_OF_OBS = prnAndNumOfObs\n" }, { "alpha_fraction": 0.4600180685520172, "alphanum_fraction": 0.6168355345726013, "avg_line_length": 39.51626205444336, "blob_id": "011a43a4fb9ca23cca5e325ad3627696e9857847", "content_id": "7c36f32a66a30354a791ab6955e95e4ea1a63d22", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9967, "license_type": "no_license", "max_line_length": 100, "num_lines": 246, "path": "/test/satellitePVTHealthEvaluationTest.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "import unittest\nfrom nose.tools import *\nfrom decimal import *\nimport sys\nfrom kpi.broadcastnavigationdecoder.satellitePVTHealthEvaluation import SatellitePVTHealthEvaluation\nfrom kpi.data.navDataModel import NavDataModel\nfrom kpi.data.svNavPVT import SvNavPVT\nfrom kpi.helper.kpiConstants import KpiConstants\n\n\nclass TestSatellitePVTHealthEvaluation(unittest.TestCase):\n \"\"\"\n Notice! All these test have to recheck when calculation data of dlr is\n received. So all these tests are pseudo tests. We are not right if this\n calculations are right\n \"\"\"\n\n def setUp(self):\n self.classToTest = SatellitePVTHealthEvaluation()\n self.bceList = [\n \"test/resources/broadcastnavigationdecoder/testbrdm1220.16p\"]\n self.timeTagGpsList = []\n # (1895, 3600 * 7)]\n for x in range(1, 24):\n self.timeTagGpsList.append((1895, 3600 * x))\n self.prnId = [\"E08\", \"E11\"]\n self.service = \"E1,E5a,E5b\"\n self.aodMax = 3\n\n def testHealthEvaluationSuccess(self):\n dataset = self.getTestDatasets()\n health = self.classToTest.healthEvaluation(\n dataset[0], dataset[4], self.prnId, self.timeTagGpsList,\n self.service)\n assert health[0][1].SIS_HS == \"0\"\n assert health[0][1].SISA_FLAG_STATUS == \"not NAPA\"\n\n def testHealthEvaluationNAPA(self):\n dataset = self.getTestDatasets()\n health = self.classToTest.healthEvaluation(\n dataset[1], dataset[5], self.prnId, self.timeTagGpsList,\n self.service)\n assert health[0][1].SIS_HS == \"1\"\n assert health[0][1].SISA_FLAG_STATUS == \"2.550000000000e+02\"\n\n def testHealhEvaluationUnhealthy(self):\n service = \"E1,E5a,E5b,E1-E5a,E1-E5b\"\n inav, fnav = self.createHealthTestData(127.0, 0.0)\n health = self.classToTest.healthEvaluation(\n inav, fnav, self.prnId, self.timeTagGpsList, service)\n assert health[0][1].SIS_HS == \"1\"\n assert health[1][1].SIS_HS == \"0\"\n assert health[2][1].SIS_HS == \"1\"\n assert health[3][1].SIS_HS == \"1\"\n assert health[4][1].SIS_HS == \"1\"\n inav, fnav = self.createHealthTestData(0.0, 127.0)\n health = self.classToTest.healthEvaluation(\n inav, fnav, self.prnId, self.timeTagGpsList, service)\n assert health[0][1].SIS_HS == \"0\"\n assert health[1][1].SIS_HS == \"1\"\n assert health[2][1].SIS_HS == \"0\"\n assert health[3][1].SIS_HS == \"1\"\n assert health[4][1].SIS_HS == \"0\"\n inav, fnav = self.createHealthTestData(127.0, 127.0)\n health = self.classToTest.healthEvaluation(\n inav, fnav, self.prnId, self.timeTagGpsList, service)\n assert health[0][1].SIS_HS == \"1\"\n assert health[1][1].SIS_HS == \"1\"\n assert health[2][1].SIS_HS == \"1\"\n assert health[3][1].SIS_HS == \"1\"\n assert health[4][1].SIS_HS == \"1\"\n\n # TODO, wait for reference data to make right tests\n def testPseudoPositionAndVelocity(self):\n dataset = self.getTestDatasets()\n navPvt = SvNavPVT()\n self.classToTest.calculatePosition(\n dataset[0], self.timeTagGpsList[1], navPvt)\n assert navPvt.X_nav == -18263558.253876142\n assert navPvt.Y_nav == -4153009.915745916\n assert navPvt.Z_nav == -22912592.007180363\n assert navPvt.Vx_nav == 2593.3532854882274\n assert navPvt.Vy_nav == -699.2158317957653\n assert navPvt.Vz_nav == 978.7841593834917\n\n # TODO, wait for reference data to make right tests\n def testPseudoDatasetSelection(self):\n inavDatasets = [self.getTestDatasets()[0]]\n inavDatasets.append(self.getTestDatasets()[1])\n inavSet = [(\"E08\", inavDatasets)]\n fnavDatasets = [self.getTestDatasets()[4]]\n fnavDatasets.append(self.getTestDatasets()[5])\n fnavSet = [(\"E08\", fnavDatasets)]\n inav, fnav = self.classToTest.dataSetSelection(\n self.timeTagGpsList[1], inavSet, fnavSet, 3)\n assert inav.TIME_TAG == self.getTestDatasets()[0].TIME_TAG\n assert fnav.TIME_TAG == self.getTestDatasets()[5].TIME_TAG\n\n # TODO, wait for reference data to make right tests\n def testPseudoClockBiasRate(self):\n dataset = self.getTestDatasets()\n tsvInav, tsvE1, tsvE2Inav = self.classToTest.calculateClockBias(\n dataset[0], self.timeTagGpsList[1],\n KpiConstants.fE5b, True)\n assert tsvInav == 0.0018134631245634372\n assert tsvE1 == 0.0018134647799626826\n assert tsvE2Inav == 0.001813465944115901\n tsvFnav, tsvE2Fnav = self.classToTest.calculateClockBias(\n dataset[0], self.timeTagGpsList[1],\n KpiConstants.fE5a, False)\n assert tsvE2Fnav != tsvE2Inav\n assert tsvE2Fnav == 0.0018134657562532404\n assert tsvInav == tsvFnav\n\n def getTestDatasets(self):\n dataSet = []\n # INAV success 0\n model = NavDataModel()\n model.PRN_ID = \"E08\"\n model.TIME_TAG = \"2016 05 01 00 00 00\"\n model.SV_CLOCK_BIAS = 1.810591667891e-03\n model.SV_CLOCK_DRIFT = 3.988134267274e-10\n model.SV_CLOCK_DRIFT_RATE = 0.000000000000e+00\n model.IOD_NAV = 0.000000000000e+00\n model.CRS = 6.662500000000e+01\n model.DELTA_N = 3.311566511466e-09\n model.M_0 = -1.122746861816e+00\n model.CUC = 3.136694431305e-06\n model.ECCENTRICITY = 2.200541784987e-04\n model.CUS = 7.826834917068e-06\n model.TOE = 0.000000000000e+00\n model.SQRT_A = 5.440608263016e+03\n model.CIC = 6.891787052155e-08\n model.OMEGA_0 = -2.817124155957e-01\n model.CIS = 8.195638656616e-08\n model.I_0 = 9.594287808826e-01\n model.CRC = 1.721562500000e+02\n model.OMEGA = -1.672201682621e+00\n model.OMEGA_DOT = -5.717738166745e-09\n model.IDOT = 2.542963067476e-10\n model.DATA_SOURCES = 513.0000000000\n model.GAL_WEEK = 1.895000000000e+03\n model.SISA_SIGNAL = 3.120000000000e+00\n model.BGD_E5_A = -7.450580596924e-09\n model.BGD_E5_B = -8.614733815193e-09\n model.TTOM = 6.850000000000e+02\n model.SV_HEALTH = Decimal(\"0.000000000000e+00\")\n dataSet.append(model)\n # INAV NAPA 1\n model = NavDataModel()\n model.PRN_ID = \"E08\"\n model.TIME_TAG = \"2016 05 01 00 05 00\"\n model.DATA_SOURCES = 513.0000000000\n model.TTOM = \"7.300000000000e+02\"\n model.TOE = \"6.300000000000e+02\"\n model.SISA_SIGNAL = \"2.550000000000e+02\"\n model.SV_HEALTH = Decimal(\"0.000000000000e+00\")\n model.GAL_WEEK = 1.795000000000e+03\n dataSet.append(model)\n # INAV time inconsitence 2\n model = NavDataModel()\n model.PRN_ID = \"E08\"\n model.DATA_SOURCES = 517.0000000000\n model.TTOM = \"7.300000000000e+02\"\n model.TOE = \"8.300000000000e+02\"\n model.SISA_SIGNAL = \"2.550000000000e+02\"\n model.SV_HEALTH = Decimal(\"0.000000000000e+00\")\n dataSet.append(model)\n # INAV SISA inconsitence 3\n model = NavDataModel()\n model.PRN_ID = \"E08\"\n model.DATA_SOURCES = 517.0000000000\n model.TTOM = \"7.300000000000e+02\"\n model.TOE = \"6.300000000000e+02\"\n model.SISA_SIGNAL = \"2.560000000000e+02\"\n model.SV_HEALTH = Decimal(\"0.000000000000e+00\")\n dataSet.append(model)\n # FNAV success 4\n model = NavDataModel()\n model.PRN_ID = \"E08\"\n model.TIME_TAG = \"2016 05 01 00 00 00\"\n model.DATA_SOURCES = 258.0000000000\n model.TTOM = \"7.300000000000e+02\"\n model.TOE = \"7.300000000000e+02\"\n model.SISA_SIGNAL = \"1.550000000000e+02\"\n model.SV_HEALTH = Decimal(\"0.000000000000e+00\")\n model.GAL_WEEK = 1.895000000000e+03\n dataSet.append(model)\n # FNAV NAPA 5\n model = NavDataModel()\n model.PRN_ID = \"E08\"\n model.TIME_TAG = \"2016 05 01 00 05 00\"\n model.DATA_SOURCES = 258.0000000000\n model.TTOM = \"8.300000000000e+02\"\n model.TOE = \"7.300000000000e+02\"\n model.SISA_SIGNAL = \"2.550000000000e+02\"\n model.SV_HEALTH = Decimal(\"0.000000000000e+00\")\n model.GAL_WEEK = 1.895000000000e+03\n dataSet.append(model)\n # FNAV time inconsitence\n model = NavDataModel()\n model.PRN_ID = \"E08\"\n model.DATA_SOURCES = 258.0000000000\n model.TTOM = \"7.300000000000e+02\"\n model.TOE = \"8.300000000000e+02\"\n model.SISA_SIGNAL = \"2.550000000000e+02\"\n model.SV_HEALTH = Decimal(\"0.000000000000e+00\")\n dataSet.append(model)\n # FNAV SISA inconsitence\n model = NavDataModel()\n model.PRN_ID = \"E08\"\n model.DATA_SOURCES = 258.0000000000\n model.TTOM = \"7.300000000000e+02\"\n model.TOE = \"6.300000000000e+02\"\n model.SISA_SIGNAL = \"2.560000000000e+02\"\n model.SV_HEALTH = Decimal(\"0.000000000000e+00\")\n dataSet.append(model)\n # NONE Dataset\n model = NavDataModel()\n model.PRN_ID = \"E08\"\n model.DATA_SOURCES = 516.0000000000\n model.TTOM = \"7.300000000000e+02\"\n model.TOE = \"6.300000000000e+02\"\n model.SISA_SIGNAL = \"2.500000000000e+02\"\n dataSet.append(model)\n return dataSet\n\n def createHealthTestData(self, inavHealth, fnavHealth):\n fnav = NavDataModel()\n fnav.PRN_ID = \"E08\"\n fnav.DATA_SOURCES = 258.0000000000\n fnav.TTOM = \"7.300000000000e+02\"\n fnav.TOE = \"6.300000000000e+02\"\n fnav.SISA_SIGNAL = \"1.550000000000e+02\"\n fnav.SV_HEALTH = fnavHealth\n inav = NavDataModel()\n inav.PRN_ID = \"E08\"\n inav.DATA_SOURCES = 517.0000000000\n inav.TTOM = \"7.300000000000e+02\"\n inav.TOE = \"6.300000000000e+02\"\n inav.SISA_SIGNAL = \"1.550000000000e+02\"\n inav.SV_HEALTH = inavHealth\n return inav, fnav\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.5231920480728149, "alphanum_fraction": 0.5364089608192444, "avg_line_length": 40.77083206176758, "blob_id": "2e99a94e0413b9395e154fe02cfeb5c7d15666d6", "content_id": "2d8e168d06862431bde8a99999f984c5fb5b56e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4010, "license_type": "no_license", "max_line_length": 79, "num_lines": 96, "path": "/refephermisclockdecoder/refEphermisClockDecoder.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport sys\nfrom kpi.helper.common import Common\nfrom kpi.data.svRefPVTCoM import SvRefPVTCoM\n\n\nclass RefEphermisClockDecoder:\n \"\"\"\n Reference Ephermis and Clock Decoder\n reads Daily SP3C Files and decode to internal data structure.\n Module works with two blocks.\n sp3c-file parser and time reference consistency check\n \"\"\"\n\n log = Common.getLogger('RefEphermisClockDecoder')\n\n def decodeRefEphermisAndClock(self, timeTagsCal, sp3List, prnId):\n \"\"\"\n module decode ref ephermis and clock. Parse the sp3c files and\n check them for consistency\n timeTagsCal [string]: time Tags ommitted by configuration parser\n sp3list [file]: daily sp3c array\n prnId [string]: array of all satelites\n return [string]: Array with svRefPVTCoM values.\n \"\"\"\n result, timeTagsRef = self.parseSP3CFiles(sp3List, prnId)\n if self.timeRefConsistencyCheck(timeTagsCal, timeTagsRef) is False:\n self.log.error(\n \"TimeTags are different, please refresh configuration file\")\n raise Exception(\n \"TimeTags are different, please refresh configuration file\")\n else:\n return result\n\n def parseSP3CFiles(self, sp3List, prnId):\n \"\"\"\n parse the SP3C Files\n returns an array with all the record data per PNR\n and an array with them referenced TimeTags\n sp3list [file]: daily sp3c array\n prnId [string]: array of all satelites\n return [string], [string]: Array with svRefPVTCoM values and\n array with dateTag in Calendar Format.\n \"\"\"\n self.log.info(\"Starting sp3cparser...\")\n result = []\n timeTagsRef = []\n for sp3File in sp3List:\n sp3CData = open(sp3File)\n if sp3CData is None:\n self.log.error(\"SP3C File %s was not found.\", sp3File)\n for line in sp3CData:\n fields = line.split()\n # new records start with *\n if line.startswith(\"* \"):\n self.log.debug(\"found record\")\n timeTagsRef.append(line[3:31])\n elif line.startswith(\"P\"):\n if fields[0][1:] in prnId:\n self.log.debug(\n \"found postion records for PRN: %s\", fields[0][1:])\n model = SvRefPVTCoM(fields[0][1:], fields[1],\n fields[2], fields[3],\n None, None, None,\n fields[4])\n result.append(model)\n elif line.startswith(\"V\"):\n if fields[0][1:] in prnId:\n self.log.debug(\n \"found velocity record for PRN: %s\", fields[0][1:])\n model = SvRefPVTCoM(fields[0][1:], None, None, None,\n fields[1],\n fields[2], fields[3],\n fields[4])\n result.append(model)\n if len(result) < 1:\n self.log.error(\"No SP3C Files in %s\", sp3CData)\n sp3CData.close()\n self.log.info(\"Quit sp3cparser.\")\n return result, timeTagsRef\n\n def timeRefConsistencyCheck(self, timeTagsCal, timeTagsRef):\n \"\"\"\n checks the timetags comming from config file\n against the timeTags references from SP3CParser\n timeTagsCal [string]: time Tags ommitted by configuration parser\n timeTagsRef [string]: time Tags ommitted by sp3c parser\n return boolean.\n \"\"\"\n if set(timeTagsCal) == set(timeTagsRef):\n self.log.debug(\"Timetags are ok.\")\n return True\n else:\n self.log.error(\"Timetags are different.\")\n return False\n" }, { "alpha_fraction": 0.5633640289306641, "alphanum_fraction": 0.5748847723007202, "avg_line_length": 36.739131927490234, "blob_id": "f9e51eee0e78bf8ca02291e3fc6fa65fd57dc9a2", "content_id": "f560e57f065df5e4da22eed4be01db4be545b6f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 868, "license_type": "no_license", "max_line_length": 79, "num_lines": 23, "path": "/data/glonassSlotFreqNumbers.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nclass GlonassSlotFreqNumbers:\n \"\"\"\n Author: Bhaskar Shankar (IDV-AG)\n Date : 05 Sept 2016\n\n Assigned to Class variable of ObservationsDataHeader\n VAR - GLONASS_SLOT_FREQ_NUMBERS\n Parameters:\n NUMBER_OF_SATELLITES - Number of satellites in list\n LIST_OF_SATELLITES_FREQUENCIES - Satellite numbers (system code, slot)\n Frequency numbers (-7...+6)\n Use continuation lines for more than 8\n Satellites\n \"\"\"\n\n def __init__(self, numberOfFrequencies=\"\", satellitesFrequencies=\"\"):\n self.NUMBER_OF_SATELLITES = numberOfFrequencies\n # Class Ref: (List) SatellitesFrequencies\n self.LIST_OF_SATELLITES_FREQUENCIES = satellitesFrequencies\n" }, { "alpha_fraction": 0.6687306761741638, "alphanum_fraction": 0.673374593257904, "avg_line_length": 31.299999237060547, "blob_id": "80a7b55a85a8c1a6f30896348324aaf9690ad9dc", "content_id": "a63952289b47454e2c1011209335b9c549bc17c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 646, "license_type": "no_license", "max_line_length": 70, "num_lines": 20, "path": "/data/antennaDelta.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nclass AntennaDelta:\n \"\"\"\n Section: 5.6 : Observer Files Decoder Module\n Class varialbe at ObservationsDataHeader class for ANTENNA_DELTA.\n DOC DATA:\n Antenna height: Height of the antenna reference point (ARP) above\n the marker\n Horizontal eccentricity of ARP relative to the marker (east/north)\n All units in meters\n \"\"\"\n\n def __init__(self, antennaHeight=\"\", eccentricityEast=\"\",\n eccentricityNorth=\"\"):\n self.ANTENNA_HEIGHT = antennaHeight\n self.ECCENTRICITY_EAST = eccentricityEast\n self.ECCENTRICITY_NORTH = eccentricityNorth\n" }, { "alpha_fraction": 0.6379746794700623, "alphanum_fraction": 0.6392405033111572, "avg_line_length": 31.91666603088379, "blob_id": "25e0dd6511a00e301aac57344206b6906a9b5875", "content_id": "011a1178d74c40ebef8316f52261429be86945c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 792, "license_type": "no_license", "max_line_length": 74, "num_lines": 24, "path": "/data/svHealthModel.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nclass SvHealthModel:\n \"\"\"\n Position data in ECEF format, based on broadcast ephemeris data. It is\n expressed to SV’s Antenna Phase Center NAV in meters.\n PRN_ID string: satellite id\n EPOCH string: Epoch of calculation\n SIS_HS string: signal in space health status. combination of SHS, DVS\n and SISA_FLAG_STATUS\n SHS string: signal health status\n DVS string: data validity status\n SISA_FLAG_STATUS string: signal in space accuracy flag\n \"\"\"\n\n def __init__(self, prnId, epoch, sisHs, shs, dvs, sisaFlagStatus):\n self.PRN_ID = prnId\n self.EPOCH = epoch\n self.SIS_HS = sisHs\n self.SHS = shs\n self.DVS = dvs\n self.SISA_FLAG_STATUS = sisaFlagStatus\n" }, { "alpha_fraction": 0.6579954028129578, "alphanum_fraction": 0.6587605476379395, "avg_line_length": 38.60606002807617, "blob_id": "d1b8f50cf07c7172d91323b4b9cd18a792cfdad8", "content_id": "f49762168b523ad09f71c2059bb09150d8f16256", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1307, "license_type": "no_license", "max_line_length": 73, "num_lines": 33, "path": "/data/navHeaderModel.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nclass NavHeaderModel:\n \"\"\"\n nav header object. inherit all the header variables extracted\n from RINEX file.\n Parameter:\n FORMAT_VERSION string: RINEX format version\n FILE_TYPE string: file type e.g. \"N\": Navigation data\n SATELLITE_SYSTEM system string: satellite system \"E\": Galileo\n PROGRAMM_NAME string: name of program creating current file\n AGENCY_NAME string: name of agency creating current file\n TIME_TAG string: date and time of file creation\n TIME_ZONE string: code for timezone \"UTC\"\n COMMENT [string]: comment line(s)\n TIME_SYSTEM_CORRECTION [TimeSystemCorrectionModel]: list of the\n time system correction data\n \"\"\"\n\n def __init__(self, formatVersion=\"\", fileType=\"\", satelliteSystem=\"\",\n programmName=\"\", agencyName=\"\", timeTag=\"\", timeZone=\"\",\n comment=\"\", timeSystemCorrection=\"\"):\n self.FORMAT_VERSION = formatVersion\n self.FILE_TYPE = fileType\n self.SATELLITE_SYSTEM = satelliteSystem\n self.PROGRAMM_NAME = programmName\n self.AGENCY_NAME = agencyName\n self.TIME_TAG = timeTag\n self.TIME_ZONE = timeZone\n self.COMMENT = comment\n self.TIME_SYSTEM_CORRECTION = timeSystemCorrection\n" }, { "alpha_fraction": 0.6193805932998657, "alphanum_fraction": 0.6343656182289124, "avg_line_length": 34.75, "blob_id": "6d6c4ae8cf52373c115cf66df27b97c25a8d44f3", "content_id": "1a03db9cc01d6a46e322e2632390a25a4bf750db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1001, "license_type": "no_license", "max_line_length": 77, "num_lines": 28, "path": "/data/timeSystemCorrectionModel.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nclass TimeSystemCorrectionModel:\n \"\"\"\n time system correction object. inherit the timeSystem correction data for\n the RINEX Nav header.\n Parameter:\n CORRECTION_TYPE string: Time System Correction (TSC) type\n A_0 string = coefficient of 1-dep polynomial\n A_1 string = coefficient of 1-dep polynomial\n REF_TIME string: (TSC) reference time for polynomial\n REF_WEEK_NR string: (TSC) referenced week number\n TSC_S string: (TSC) EGNOS, WAAS or MSARS, derived from Service provider.\n default_ Snn with nn=PRN-100\n UTC_IDENTIFER string: 0 if unknown\n \"\"\"\n\n def __init__(self, correctionType=\"\", a0=\"\", a1=\"\", refTime=\"\",\n refWeekNr=\"\", tscS=\"\", utcIdentifer=\"\"):\n self.CORRECTION_TYPE = correctionType\n self.A_0 = a0\n self.A_1 = a1\n self.REF_TIME = refTime\n self.REF_WEEK_NR = refWeekNr\n self.TSC_S = tscS\n self.UTC_IDENTIFER = utcIdentifer\n" }, { "alpha_fraction": 0.5757575631141663, "alphanum_fraction": 0.5875421166419983, "avg_line_length": 43, "blob_id": "28ec7528a64f5abacd5d3c4198045e2c9f3b30d9", "content_id": "4a7530279a06ab29295e7967a90d78b731ee78e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1188, "license_type": "no_license", "max_line_length": 78, "num_lines": 27, "path": "/data/sysScaleFactor.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nclass SysScaleFactor:\n \"\"\"\n Assigned to Class variable of ObservationsDataHeader\n SYS_SCALE_FACTOR (SYS / SCALE FACTOR)\n Parameters:\n SATELLITE_SYSTEM - Satellite system (G/R/E/J/C/I/S)\n DIVIDING_FACTOR - Factor to divide stored observations with\n before use (1,10,100,1000)\n NUM_OBSERVATION_TYPES - Number of observation types involved. 0 or\n blank: All observation types\n OBSERVATION_TYPE_LIST - List of observation types\n (Use continuation line(s) for more than 12\n observation types)\n Repeat record if different factors are applied to\n different observation types\n \"\"\"\n\n def __init__(self, satelliteSystem=\"\", dividingFactor=\"\",\n numObservationTypes=\"\", observationTypeList=\"\"):\n self.SATELLITE_SYSTEM = satelliteSystem\n self.DIVIDING_FACTOR = dividingFactor\n self.NUM_OBSERVATION_TYPES = numObservationTypes\n self.OBSERVATION_TYPE_LIST = observationTypeList\n" }, { "alpha_fraction": 0.5395578742027283, "alphanum_fraction": 0.5578824877738953, "avg_line_length": 32.378639221191406, "blob_id": "e0f9af4a51e5d264bec655dfe7007b2620437f75", "content_id": "4e5cc7f9ad324fae34e70bc3eb1d7f0bf5f6296c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3438, "license_type": "no_license", "max_line_length": 79, "num_lines": 103, "path": "/helper/common.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nimport logging\nimport logging.config\nimport configparser\nimport datetime\n# TODO: Generische Klasse: bitte keinen Zugriff auf fachliche Module!\nfrom kpi.data.timeTagGpsModel import TimeTagGpsModel\n\n\nclass Common:\n \"static initialization of logging\"\n logging.config.fileConfig(\"resources/logging.conf\")\n\n def getLogger(name):\n \"\"\"\n uses python-logging and logs to the given file\n name : logger name (best practise: __name__)\n \"\"\"\n return logging.getLogger(name)\n\n def setUpLoggingFile(fileName):\n \"\"\"\n sets the logfile name to the logging.conf properties\n fileName String: name for the logfile\n \"\"\"\n config = configparser.ConfigParser()\n config.read(r\"resources/logging.conf\")\n config.set(\"handler_file_handler\", \"args\", \"('\" + fileName + \"', 'w')\")\n with open(r\"resources/logging.conf\", \"w\") as config_file:\n config.write(config_file)\n\n def printProgress(i, max, comment):\n \"\"\"\n prints a simple terminal progress bar\n i : current state\n max : max value for i\n comment: optional text at the end\n \"\"\"\n p = 1 + int(100 * (i / float(max)))\n a = '[' + int(p / 10) * '='\n b = ' ' * (10 - int(p / 10))\n c = ']' + str(p) + '%'\n print(a + b + c, comment, end='\\r')\n\n def printSystemInfo():\n \"\"\"\n prints a short system info to console\n \"\"\"\n print(\"============ System ============\")\n print(sys.version)\n\n def getDatetimeFromGregorianDate(gregorianDate):\n \"\"\"\n date is gregorian form \"2016 12 01 09 42 11\" yyyy,MM,dd,h.m,s\n gregorianDate String: input gregorianDate\n return python datetime formatted date\n \"\"\"\n date = gregorianDate.split(\" \")\n return datetime.datetime(int(date[0]), int(date[1]), int(date[2]),\n int(date[3]), int(date[4]), int(date[5]))\n\n def getDayOfWeek(day):\n \"\"\"\n get day of the week. Sun:0, Mon:1, ... Sat: 6\n day datetime: day in datetimeformat\n return day of week\n \"\"\"\n dow = int(day.weekday()) + 1\n if dow == 7:\n dow = 0\n return dow\n\n def convertUtfToGpsFormat(date):\n \"\"\"\n converts UTF Time to GPS Time.\n date datetime: date in UTF Format\n returns tuple(gpsWeekDay, gpsSeconds)\n \"\"\"\n seconds = (date -\n datetime.datetime(date.year,\n date.month, date.day)).total_seconds()\n dow = Common.getDayOfWeek(date)\n week = (date - datetime.datetime(1980, 1, 6, 0, 0, 0)) / 7\n gpsseconds = dow * 86400 + seconds\n return TimeTagGpsModel(week.days, gpsseconds)\n\n def convertDecimalToBinary(decValue, digits):\n \"\"\"\n convert a decimal value to a binary value with n digits.\n e.g.: decValue: 5 and digits: 6 -> 000101\n parameter:\n decValue Decimal: decimal value to convert\n digits int: count of digits the binaryvalue should have\n returns string: binValue with n digits\n \"\"\"\n binValue = str(bin(int(decValue)))[2:]\n if len(binValue) < digits:\n for x in range(0, digits - len(binValue)):\n binValue = \"0\" + binValue\n return binValue\n" }, { "alpha_fraction": 0.5939139723777771, "alphanum_fraction": 0.601259171962738, "avg_line_length": 35.653846740722656, "blob_id": "83788b6af5c9c51904e003433f771835698411b5", "content_id": "efcd237bb5a4db00ee6b5e65097af9c262aaa919", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 953, "license_type": "no_license", "max_line_length": 72, "num_lines": 26, "path": "/data/sysCorrectionApplied.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nclass SysCorrectionApplied:\n \"\"\"\n Author: Bhaskar Shankar (IDV-AG)\n Date : 05 Sept 2016\n\n Assigned to Class variable of ObservationsDataHeader\n SYS_DCBS_APPLIED (SYS / DCBS APPLIED)\n SYS_PCVS_APPLIED (SYS / PCVS APPLIED)\n Parameters:\n SATELLITE_SYSTEM - Satellite system (G/R/E/J/C/I/S)\n CORRECTION_PROGRAM - Program name used to apply\n differential code bias corrections or\n phase center variation corrections\n SOURCE_OF_CORRECTION - Source of corrections (URL)\n Repeat for each satellite system.\n \"\"\"\n\n def __init__(self, satelliteSystem=\"\", correctionProgram=\"\",\n sourceOfCorrection=\"\"):\n self.SATELLITE_SYSTEM = satelliteSystem\n self.CORRECTION_PROGRAM = correctionProgram\n self.SOURCE_OF_CORRECTION = sourceOfCorrection\n" }, { "alpha_fraction": 0.5868673324584961, "alphanum_fraction": 0.603283166885376, "avg_line_length": 28.239999771118164, "blob_id": "41b91011e978e06558a223a4ed993ea861bac2b7", "content_id": "59502e8e0c656eb0ff3dad5d748e0a6494426471", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 731, "license_type": "no_license", "max_line_length": 65, "num_lines": 25, "path": "/data/satellitesFrequencies.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nclass SatellitesFrequencies:\n\n \"\"\"\n Author: Bhaskar Shankar (IDV-AG)\n Date : 05 Sept 2016\n\n Class variable for GlonassSlotFreqNumbers\n Var - LIST_OF_SATELLITES_FREQUENCIES\n\n List of :\n Satellite numbers (system code, slot)\n Frequency numbers (-7...+6)\n Use continuation lines for more than 8 Satellites\n Parameters:\n SATELLITE_NUMBER - Satellite numbers (system code, slot)\n FREQUENCY_NUMBER - Frequency numbers (-7...+6)\n \"\"\"\n\n def __init__(self, satelliteNumber=\"\", frequencyNumber=\"\"):\n self.SATELLITE_NUMBER = satelliteNumber\n self.FREQUENCY_NUMBER = frequencyNumber\n" }, { "alpha_fraction": 0.5286614298820496, "alphanum_fraction": 0.5478715300559998, "avg_line_length": 41.25324630737305, "blob_id": "41a004fce95bc56bdfccd3dc0265213590db692e", "content_id": "b5dfe1fef1b25a6f388b13444a35679d643ea9c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6507, "license_type": "no_license", "max_line_length": 79, "num_lines": 154, "path": "/differentialCodeBiasFilesDecoder/dcbParser.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport sys\nfrom kpi.helper.common import Common\nfrom kpi.data.dcbData import DcbData\nfrom kpi.data.dcbE1E5 import DcbE1E5\n\n\nclass DCBParser:\n \"\"\"\n Reads DCB values from files in DCB_LIST and return\n the DCB per PNRID per EPOCH.\n It has 2 parts - DCB_Data_Extractor and DCB_Parser\n \"\"\"\n\n log = Common.getLogger('DCBParser')\n # Frequency according to Priority - E1-E5a and E1-E5b\n e1E5aFreqPrioList = ['C1X-C5X', 'C1Q-C5Q',\n 'C1Q-C5X', 'C1X-C5Q']\n\n e1E5bFreqPrioList = ['C1X-C7X', 'C1Q-C7Q',\n 'C1Q-C7X', 'C1X-C7Q']\n\n def dcbDataExtractor(self, dcbList, timeTagCal, prnIdList):\n \"\"\"\n This Block adapts the per satellite daily DCB\n information to each epoch contained\n in timeTagCal\n Parameters:\n dcbList = List of DCB Files\n timeTagCal = TimeTagCals List\n prnIdList = List of PNR IDs\n Return:\n DCB_DATA = Alle DCB Data in per Epoch per PNR Basis\n \"\"\"\n self.log.info(\"Start DCB Decoder\")\n\n dcb_data = []\n for timeTagCalEpoch in timeTagCal:\n for pnrId in prnIdList:\n \"\"\"\n Check condition from read_DCB.m\n 'E' - Only Galilian satellites should be checked\n \"\"\"\n if(pnrId.startswith(\"E\") and pnrId[1] != \" \"):\n # Get all the DCB Values pro epoch pro pnrid\n dcbDailyPerSatEpoch, timeTagCalEpoch = self.dcbParser(\n timeTagCalEpoch, dcbList, pnrId)\n filterData = self.filterForEachFrequency(\n timeTagCalEpoch, dcbDailyPerSatEpoch, pnrId)\n dcb_data.append(filterData)\n self.log.info(\"End DCB Decoder\")\n return dcb_data\n\n def dcbParser(self, timeTagCalEpoch, dcbFileList, prnId):\n \"\"\"\n Returns the daily DCB value for each available frequency\n combination for all the files contained in DCB_daily list\n for given pnrId.\n Parameters:\n timeTagCalEpoch = (String) single Epoch\n dcbFileList = (List) DCB File List\n pnrid = (String) PNR ID\n Return:\n dcbDailyPerSatEpoch = Alle DCB Data in per Epoch per PNR Basis\n timeTagCalEpoch = timeTagEpoch\n \"\"\"\n dcbDailyPerSatEpoch = []\n for dcbFile in dcbFileList:\n # Compare with Date and not include time\n if dcbFile[1][0:10] == timeTagCalEpoch[0:10]:\n dcbFile = dcbFile[0]\n dcbDailyData = open(dcbFile)\n if dcbDailyData is None:\n self.log.error(\"DCB File %s was not found.\", dcbFile)\n else:\n for line in dcbDailyData:\n fields = line.split()\n if line.startswith(\" DCB \") and prnId == fields[2]:\n singleDcb = DcbE1E5(fields[2], fields[5],\n fields[6], fields[3] +\n \"-\" + fields[4],\n # DCB from nanoseconds to\n # seconds\n float(fields[8]) / (10**9))\n dcbDailyPerSatEpoch.append(singleDcb)\n dcbDailyData.close()\n return dcbDailyPerSatEpoch, timeTagCalEpoch\n\n def filterForEachFrequency(self, timeTagCalEpoch, dcbDailyPerSatEpoch,\n pnrId):\n \"\"\"\n From all available DCB values from given PNR in dcbdaily,\n the algorithm should select one single value for each\n DCB_E1_E5a and DCB_E1E5b\n Parameters:\n timeTagCalEpoch = (String) single Epoch\n dcbDailyPerSatEpoch = (List) Daily DCB per Epoch per Satellite\n Return:\n dcbDailyPerSatResult = (String) return single\n Data per epoch satellite\n \"\"\"\n sortDailyDcbE1E5a = []\n sortDailyDcbE1E5b = []\n dcbValFreq = DcbE1E5()\n # Loop over all DCBs and filter for different frequencies\n for dcbValFreq in dcbDailyPerSatEpoch:\n # Filter for Frequency E1-E5a\n if ('C1X-C5' in dcbValFreq.FRQUENCY or\n 'C1Q-C5' in dcbValFreq.FRQUENCY):\n # tempE1E5a = [dcbValFreq[3], dcbValFreq]\n sortDailyDcbE1E5a.append(dcbValFreq)\n # Filter for Frequency E1-E5b\n elif ('C1X-C7' in dcbValFreq.FRQUENCY or\n 'C1Q-C7' in dcbValFreq.FRQUENCY):\n # tempE1E5b = [dcbValFreq[3], dcbValFreq]\n sortDailyDcbE1E5b.append(dcbValFreq)\n\n if len(sortDailyDcbE1E5a) == 0 or len(sortDailyDcbE1E5b) == 0:\n self.log.error(\"No frequency for given PNRID %s\",\n pnrId)\n raise Exception(\n \"No frequencies (E1-E5a or E1-E5b) are available for PRNID: \" +\n dcbValFreq.PNR_ID + \" and TimeTagCalEpoch: \" +\n timeTagCalEpoch)\n\n dcbDailyPerSatE1E5a = DcbE1E5()\n dcbDailyPerSatE1E5b = DcbE1E5()\n dcbDailyPerSatE1E5a = self.filterDCBFromFreqPrioList(\n sortDailyDcbE1E5a, self.e1E5aFreqPrioList)\n dcbDailyPerSatE1E5b = self.filterDCBFromFreqPrioList(\n sortDailyDcbE1E5b, self.e1E5bFreqPrioList)\n # Retrun DCB Value\n dcbDailyPerSatResult = DcbData(timeTagCalEpoch,\n dcbDailyPerSatE1E5a,\n dcbDailyPerSatE1E5b)\n return dcbDailyPerSatResult\n\n def filterDCBFromFreqPrioList(self, sortDailyDcbE1EXa, e1eXaFreqPrioList):\n \"\"\" Fetch the DCB value with highest\n priority according to frequency list\n Parameters:\n sortDailyDcbE1EXa = (List) Daily DCB to be filtered\n e1eXaFreqPrioList = (List) Static List\n Return:\n dcbFreq = (String) filtered single DCB Data\n \"\"\"\n # Filter according to priority list\n dcbFreq = DcbE1E5()\n for freqStr in e1eXaFreqPrioList:\n # Loop over all the DailyDCB list pro Epoch pro Satellite\n for dcbFreq in sortDailyDcbE1EXa:\n if freqStr == dcbFreq.FRQUENCY:\n return dcbFreq\n" }, { "alpha_fraction": 0.498157262802124, "alphanum_fraction": 0.5038610100746155, "avg_line_length": 45.32520294189453, "blob_id": "9e1fd6cc3cce55f090d8f582a8041a414060e07a", "content_id": "487754cc9121299c50e3bba85eea53ed5700a334", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11396, "license_type": "no_license", "max_line_length": 79, "num_lines": 246, "path": "/antennaOffsetFilesDecoderModule/antennaOffsetParser.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport sys\nfrom datetime import datetime\nfrom kpi.helper.common import Common\nfrom kpi.data.antennaPhaseOffset import AntennaPhaseOffset\nfrom kpi.data.antennaFrequency import AntennaFrequency\nfrom kpi.data.antennaNorthEastUp import AntennaNorthEastUp\nfrom kpi.data.antennaOffsetDecoderModel import AntennaOffsetDecoderModel\n\n\nclass AntennaOffsetParser:\n \"\"\"\n This module parses the input file containing Antenna Phase Center\n offset for both navigation data and precise orbit and generates\n a variable that contains per satellite per epoch antenna phase center\n offset for both navigation data and precise orbits\n \"\"\"\n\n log = Common.getLogger(\"AntennaOffsetParser\")\n\n def apcDataExtractor(self, apc_nav, apc_pro, time_tags_cals, prn_id):\n \"\"\"\n Adapts the APC offset information to each one of epoch\n contained in time_tag_cals\n Paramters:\n apc_nav - Input from antennaOffsetParser function and contains\n APC offset Navigation data extracted from text file\n for PRNID and Date in calender format and offset in\n meters\n apc_pro - Input from antennaOffsetParser function and contains\n APC offset precise orbits data extracted from text file\n for PRNID and Date in calender format and offset in\n meters\n time_tags_cals - List of TimeTagCals in Gregorian Calender Format\n prn_id - Satellite PRN to be included in the computation\n return:\n apc_Off_Bce - Contains Galileo offset for broadcast navigation\n per satellite per epoch in meters\n apc_Off_Ref - Contains Galileo offset for precise orbit data\n per satellite per epoch in meters\n \"\"\"\n\n self.log.info(\"Start Antenna Phase Center Data Extractor\")\n apc_Off_Ref = []\n apc_Off_Bce = []\n for timeTagCalEpoch in time_tags_cals:\n for prnId in prn_id:\n\n apcOffBceData = self.getAPCOffsetData(\n apc_nav, prnId, timeTagCalEpoch)\n apcOffRefData = self.getAPCOffsetData(\n apc_pro, prnId, timeTagCalEpoch)\n apc_Off_Ref.append(apcOffRefData)\n apc_Off_Bce.append(apcOffBceData)\n\n return apc_Off_Bce, apc_Off_Ref\n\n def antennaOffsetParser(self, acpOffRefFile, apcOffBceFile):\n \"\"\"\n This block parses the data contained in corresponding Antex file\n and store them in an output cariable sorted by PRN and contains\n APC offset data extracted from text file for PRNID and Date in\n calender format and offset in meters\n This has to execute 2 times - once for Navigation data APC Offset\n once for Precise Orbit data APC Offset\n Paramters:\n acpOffRefFile - Text file containing APC Offset data for\n broadcast navigation data\n apcOffBceFile - Text file containing APC Offset data for\n precise reference orbit\n Return:\n apc_nav - Contains APC offset Navigation data extracted from text\n file for PRNID and Date in calender format and offset in\n meters\n apc_pro - Input from antennaOffsetParser function and contains\n APC offset precise orbits data extracted from text file\n for PRNID and Date in calender format and offset in\n meters\n \"\"\"\n apc_nav = self.getApcOffsetData(acpOffRefFile)\n apc_pro = self.getApcOffsetData(apcOffBceFile)\n\n return apc_nav, apc_pro\n\n def getApcOffsetData(self, apcFile):\n \"\"\"\n Read every block of data, starting from \"START OF ANTENNA\" to\n \"END OF ANTENNA\" and read all the required data into AntennaPhaseOffset\n object.\n Parameter:\n apcFile : Input file Data - Navigation or Precise Orbit data\n Return:\n app_data : List of parsed AntennaPhaseOffset data\n\n Source : Implementation of \"antexFileDecoder.m\"\n \"\"\"\n app_data = []\n apcFileData = open(apcFile)\n now = datetime.now()\n if apcFileData is None:\n self.log.error(\"The APC File %s doesnt exist\", apcFile)\n raise Exception(\n \"File not found at \" + apcFile)\n else:\n for line in apcFileData:\n if \"START OF ANTENNA\" in line:\n antennaOffset = AntennaPhaseOffset()\n\n # Goto line after \"START OF ANTENNA\" line\n line = next(apcFileData)\n\n # 'TYPE / SERIAL NO '\n if \"TYPE / SERIAL NO\" in line:\n antennaOffset.PVC_TYPE = line[0:20]\n antennaOffset.SERIAL_NUMBER = line[20:23]\n else:\n apcFileData.close()\n raise Exception(\n \"TYPE / SERIAL NO - tag does not exists in :\" +\n apcFile)\n # 'METH / BY / # / DATE'\n line = next(apcFileData)\n\n # 'DAZI'\n line = next(apcFileData)\n\n # 'ZEN1 / ZEN2 / DZEN '\n line = next(apcFileData)\n\n # Number OF FREQUENCIES\n line = next(apcFileData)\n if \"# OF FREQUENCIES\" not in line:\n self.log.error(\n \"# of frequencies - tag does not exists in :\" +\n apcFile)\n apcFileData.close()\n raise Exception(\n \"# of frequencies - tag does not exists in :\" +\n apcFile)\n antennaOffset.NUMBER_OF_FREQEUNCIES = line[1: 6]\n if line[0:4] == \"BLOCK\":\n antennaOffset.NUMBER_OF_FREQEUNCIES = 2\n\n # 'VALID FROM'\n line = next(apcFileData)\n if \"VALID FROM\" in line:\n fromStr = line[0:44]\n fromFormat = fromStr.split()\n if len(fromFormat[0]) < 4 or len(fromFormat) != 6:\n apcFileData.close()\n raise Exception(\n \"Invalid VALID_FROM Date Format \" + apcFile)\n validFromFormat = '{:%Y %m %d %H %M %S}'.format(\n datetime(int(fromFormat[0]), int(fromFormat[1]),\n int(fromFormat[2]), int(fromFormat[3]),\n int(fromFormat[4]),\n int(fromFormat[4][0:1])))\n antennaOffset.VALID_FROM = str(validFromFormat)\n else:\n antennaOffset.VALID_FROM = \"\"\n if \"VALID UNTIL\" in line:\n untilStr = line[0:44]\n untilFormat = untilStr.split()\n if len(untilFormat) != 6:\n apcFileData.close()\n raise Exception(\n \"Invalid VALID_UNTIL Date Format \" + apcFile)\n validUntillFormat = '{:%Y %m %d %H %M %S}'.format(\n datetime(int(untilFormat[0]), int(untilFormat[1]),\n int(untilFormat[2]), int(untilFormat[3]),\n int(untilFormat[4]),\n int(untilFormat[4][0:1])))\n antennaOffset.VALID_UNTIL = validUntillFormat\n\n # Goto 'SINEX CODE'\n line = next(apcFileData)\n else:\n # Get Sysdate as latest date\n antennaOffset.VALID_UNTIL = now.strftime(\n \"%Y %m %d %H %M %S\")\n # 'COMMENT'\n line = next(apcFileData)\n while \"COMMENT\" in line:\n line = next(apcFileData)\n freqCnt = 0\n frequencyListData = []\n while freqCnt < int(antennaOffset.NUMBER_OF_FREQEUNCIES):\n if freqCnt > 0:\n line = next(apcFileData)\n\n freqCnt = freqCnt + 1\n if \"START OF FREQUENCY\" in line:\n freqData = AntennaFrequency()\n # 'START OF FREQUENCY'\n freqData.NAME = line[3:6]\n # 'NORTH / EAST / UP'\n # Convert millimeters to meters\n line = next(apcFileData)\n individualData = line.split()\n antennaOffsetPosition = AntennaNorthEastUp(\n float(individualData[0]) * (0.001),\n float(individualData[1]) * (0.001),\n float(individualData[2]) * (0.001))\n freqData.NEU = antennaOffsetPosition\n # 'NOAZI'\n line = next(apcFileData)\n while \"NOAZI\" in line:\n line = next(apcFileData)\n\n while \"END OF FREQUENCY\" not in line:\n # Loop till end of antenna block\n line = next(apcFileData)\n frequencyListData.append(freqData)\n antennaOffset.FREQUENCY_LIST = frequencyListData\n app_data.append(antennaOffset)\n apcFileData.close()\n return app_data\n\n def getAPCOffsetData(self, apc_list, prnId, timeTagCalEpoch):\n \"\"\"\n Get APC Offsetdata per epoch per satellite from Parsed list apc_file\n Parameters:\n apc_file : APC list input - Navigation or Precise Orbit data\n prnId : Satellite PRN\n timeTagCalEpoch : time tag epoch in gregorian calender format\n \"\"\"\n epochDate = Common.getDatetimeFromGregorianDate(\n timeTagCalEpoch)\n\n apc_off_data = AntennaOffsetDecoderModel()\n apc_off_data.PRNID = prnId\n apc_off_data.EPOCH = timeTagCalEpoch\n # Loop over apc_nav or for each line\n for apcData in apc_list:\n if (int(len(apcData.VALID_FROM)) > 1 and\n apcData.SERIAL_NUMBER == prnId):\n validFromApc = Common.getDatetimeFromGregorianDate(\n apcData.VALID_FROM)\n validUntilApc = Common.getDatetimeFromGregorianDate(\n apcData.VALID_UNTIL)\n if (validFromApc < epochDate < validUntilApc):\n if int(apcData.NUMBER_OF_FREQEUNCIES) > 0:\n frequencyDataList = apcData.FREQUENCY_LIST\n apc_off_data.OFFSETDATA = frequencyDataList[0].NEU\n return apc_off_data\n" }, { "alpha_fraction": 0.4536060690879822, "alphanum_fraction": 0.6166174411773682, "avg_line_length": 38.35269546508789, "blob_id": "42d0160160a4bafd413b0f1abb24b926041b5174", "content_id": "0b5cce4bf603744e12a9e6e2f247ecbfd1ef6303", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9484, "license_type": "no_license", "max_line_length": 110, "num_lines": 241, "path": "/test/broadcastNavigationMessageDecoderTest.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "import unittest\nfrom nose.tools import *\nfrom decimal import *\nfrom kpi.broadcastnavigationdecoder.broadcastNavigationMessageDecoder import BroadcastNavigationMessageDecoder\nfrom kpi.broadcastnavigationdecoder.broadcastNavigationMessageDecoder import Dataset\nfrom kpi.data.navHeaderModel import NavHeaderModel\nfrom kpi.data.navDataModel import NavDataModel\nfrom kpi.data.timeSystemCorrectionModel import TimeSystemCorrectionModel\n\n\nclass TestBroadcastNavigationMessageDecoder(unittest.TestCase):\n \"\"\"\n Notice! All these test have to recheck when calculation data of dlr is\n received. So all these tests are pseudo tests. We are not right if this\n calculations are right\n \"\"\"\n\n def setUp(self):\n self.classToTest = BroadcastNavigationMessageDecoder()\n self.bceList = [\n \"test/resources/broadcastnavigationdecoder/testbrdm1220.16p\"]\n self.timeTagGpsList = []\n for x in range(1, 24):\n self.timeTagGpsList.append((1895, 3600 * x))\n self.prnId = [\"E08\", \"E11\"]\n self.service = \"E1,E5a,E5b\"\n self.aodMax = 3\n\n def testPseudoBroadcastNavigationMessageDecoder(self):\n timeTransfer, SV_HEALTH, SV_NAV_POS_VEL = self.\\\n classToTest.broadcastNavigationMessageDecoder(\n self.bceList, self.service, self.prnId, self.timeTagGpsList,\n self.aodMax)\n assert len(SV_HEALTH) == 23\n assert len(SV_NAV_POS_VEL) == 23\n assert len(timeTransfer) == 5\n assert len(SV_NAV_POS_VEL[0][1]) == 2\n assert len(SV_NAV_POS_VEL[0][1][0][1]) == 3\n\n def testTimeTransferEvaluation(self):\n headerModelDataSet = self.getHeaderModelData()\n timeTransferData = self.classToTest.timeTransferEvaluation(\n headerModelDataSet)\n assert timeTransferData[0].CORRECTION_TYPE == \"GAUT\"\n assert timeTransferData[1].CORRECTION_TYPE == \"GLPG\"\n assert timeTransferData[0].A_0 == '-1.8626451492e-09'\n assert timeTransferData[0].A_1 == '2.664535259e-15'\n assert timeTransferData[1].A_0 == '-1.8626451492e-08'\n assert timeTransferData[1].A_1 == '0.000000000e+00'\n\n def testNAVDataCleanup(self):\n NAV_datasets = self.getTestDatasets()\n INAV_datasets, FNAV_datasets = self.classToTest.navDataCleaning(\n NAV_datasets)\n assert len(INAV_datasets[0][1]) == 2\n assert len(FNAV_datasets[0][1]) == 2\n\n # test the helper methods for this class\n def testGetDataset(self):\n dataset = self.classToTest.getDataset(\n 517)\n assert dataset == Dataset.INAV\n dataset = self.classToTest.getDataset(\n 513)\n assert dataset == Dataset.INAV\n dataset = self.classToTest.getDataset(\n 589)\n assert dataset == Dataset.INAV\n dataset = self.classToTest.getDataset(\n 258)\n assert dataset == Dataset.FNAV\n dataset = self.classToTest.getDataset(\n 516)\n assert dataset == Dataset.NONE\n\n def testIsTimeConsistence(self):\n assert self.classToTest.isTimeConsistence(\n \"7.300000000000e+02\", \"0.000000000000e+00\") is True\n\n assert self.classToTest.isTimeConsistence(\n \"7.300000000000e+02\", \"8.000000000000e+02\") is False\n\n assert self.classToTest.isTimeConsistence(\n \"7.300000000000e+02\", \"7.300000000000e+02\") is True\n\n def testIsSISAConsistence(self):\n assert self.classToTest.isSISAConsistence(\n \"0.000000000000e+00\") is True\n\n assert self.classToTest.isSISAConsistence(\n \"2.560000000000e+02\") is False\n\n assert self.classToTest.isSISAConsistence(\n \"2.550000000000e+02\") is True\n\n def getHeaderModelData(self):\n tscModel = TimeSystemCorrectionModel(\"GAUT\", \"-1.8626451492e-09\",\n \"2.664535259e-15\",\n \"518400\", \"1894\")\n tscSet = [tscModel]\n tscModel = TimeSystemCorrectionModel(\"GLPG\", \"-1.8626451492e-08\",\n \"0.000000000e+00\",\n \"225280\", \"1895\")\n tscSet.append(tscModel)\n navHeaderModel = NavHeaderModel()\n navHeaderModel.TIME_SYSTEM_CORRECTION = tscSet\n NAV_headersets = [navHeaderModel]\n return NAV_headersets\n\n def getTestDatasets(self):\n dataSet = []\n # INAV success 0\n model = NavDataModel()\n model.PRN_ID = \"E08\"\n model.TIME_TAG = \"2016 05 01 00 00 00\"\n model.SV_CLOCK_BIAS = 1.810591667891e-03\n model.SV_CLOCK_DRIFT = 3.988134267274e-10\n model.SV_CLOCK_DRIFT_RATE = 0.000000000000e+00\n model.IOD_NAV = 0.000000000000e+00\n model.CRS = 6.662500000000e+01\n model.DELTA_N = 3.311566511466e-09\n model.M_0 = -1.122746861816e+00\n model.CUC = 3.136694431305e-06\n model.ECCENTRICITY = 2.200541784987e-04\n model.CUS = 7.826834917068e-06\n model.TOE = 0.000000000000e+00\n model.SQRT_A = 5.440608263016e+03\n model.CIC = 6.891787052155e-08\n model.OMEGA_0 = -2.817124155957e-01\n model.CIS = 8.195638656616e-08\n model.I_0 = 9.594287808826e-01\n model.CRC = 1.721562500000e+02\n model.OMEGA = -1.672201682621e+00\n model.OMEGA_DOT = -5.717738166745e-09\n model.IDOT = 2.542963067476e-10\n model.DATA_SOURCES = 513.0000000000\n model.GAL_WEEK = 1.895000000000e+03\n model.SISA_SIGNAL = 3.120000000000e+00\n model.BGD_E5_A = -7.450580596924e-09\n model.BGD_E5_B = -8.614733815193e-09\n model.TTOM = 6.850000000000e+02\n model.SV_HEALTH = Decimal(\"0.000000000000e+00\")\n dataSet.append(model)\n # INAV NAPA 1\n model = NavDataModel()\n model.PRN_ID = \"E08\"\n model.TIME_TAG = \"2016 05 01 00 05 00\"\n model.DATA_SOURCES = 513.0000000000\n model.TTOM = \"7.300000000000e+02\"\n model.TOE = \"6.300000000000e+02\"\n model.SISA_SIGNAL = \"2.550000000000e+02\"\n model.SV_HEALTH = Decimal(\"0.000000000000e+00\")\n model.GAL_WEEK = 1.795000000000e+03\n dataSet.append(model)\n # INAV time inconsitence 2\n model = NavDataModel()\n model.PRN_ID = \"E08\"\n model.DATA_SOURCES = 517.0000000000\n model.TTOM = \"7.300000000000e+02\"\n model.TOE = \"8.300000000000e+02\"\n model.SISA_SIGNAL = \"2.550000000000e+02\"\n model.SV_HEALTH = Decimal(\"0.000000000000e+00\")\n dataSet.append(model)\n # INAV SISA inconsitence 3\n model = NavDataModel()\n model.PRN_ID = \"E08\"\n model.DATA_SOURCES = 517.0000000000\n model.TTOM = \"7.300000000000e+02\"\n model.TOE = \"6.300000000000e+02\"\n model.SISA_SIGNAL = \"2.560000000000e+02\"\n model.SV_HEALTH = Decimal(\"0.000000000000e+00\")\n dataSet.append(model)\n # FNAV success 4\n model = NavDataModel()\n model.PRN_ID = \"E08\"\n model.TIME_TAG = \"2016 05 01 00 00 00\"\n model.DATA_SOURCES = 258.0000000000\n model.TTOM = \"7.300000000000e+02\"\n model.TOE = \"7.300000000000e+02\"\n model.SISA_SIGNAL = \"1.550000000000e+02\"\n model.SV_HEALTH = Decimal(\"0.000000000000e+00\")\n model.GAL_WEEK = 1.895000000000e+03\n dataSet.append(model)\n # FNAV NAPA 5\n model = NavDataModel()\n model.PRN_ID = \"E08\"\n model.TIME_TAG = \"2016 05 01 00 05 00\"\n model.DATA_SOURCES = 258.0000000000\n model.TTOM = \"8.300000000000e+02\"\n model.TOE = \"7.300000000000e+02\"\n model.SISA_SIGNAL = \"2.550000000000e+02\"\n model.SV_HEALTH = Decimal(\"0.000000000000e+00\")\n model.GAL_WEEK = 1.895000000000e+03\n dataSet.append(model)\n # FNAV time inconsitence\n model = NavDataModel()\n model.PRN_ID = \"E08\"\n model.DATA_SOURCES = 258.0000000000\n model.TTOM = \"7.300000000000e+02\"\n model.TOE = \"8.300000000000e+02\"\n model.SISA_SIGNAL = \"2.550000000000e+02\"\n model.SV_HEALTH = Decimal(\"0.000000000000e+00\")\n dataSet.append(model)\n # FNAV SISA inconsitence\n model = NavDataModel()\n model.PRN_ID = \"E08\"\n model.DATA_SOURCES = 258.0000000000\n model.TTOM = \"7.300000000000e+02\"\n model.TOE = \"6.300000000000e+02\"\n model.SISA_SIGNAL = \"2.560000000000e+02\"\n model.SV_HEALTH = Decimal(\"0.000000000000e+00\")\n dataSet.append(model)\n # NONE Dataset\n model = NavDataModel()\n model.PRN_ID = \"E08\"\n model.DATA_SOURCES = 516.0000000000\n model.TTOM = \"7.300000000000e+02\"\n model.TOE = \"6.300000000000e+02\"\n model.SISA_SIGNAL = \"2.500000000000e+02\"\n dataSet.append(model)\n return dataSet\n\n def createHealthTestData(self, inavHealth, fnavHealth):\n fnav = NavDataModel()\n fnav.PRN_ID = \"E08\"\n fnav.DATA_SOURCES = 258.0000000000\n fnav.TTOM = \"7.300000000000e+02\"\n fnav.TOE = \"6.300000000000e+02\"\n fnav.SISA_SIGNAL = \"1.550000000000e+02\"\n fnav.SV_HEALTH = fnavHealth\n inav = NavDataModel()\n inav.PRN_ID = \"E08\"\n inav.DATA_SOURCES = 517.0000000000\n inav.TTOM = \"7.300000000000e+02\"\n inav.TOE = \"6.300000000000e+02\"\n inav.SISA_SIGNAL = \"1.550000000000e+02\"\n inav.SV_HEALTH = inavHealth\n return inav, fnav\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.4349497854709625, "alphanum_fraction": 0.5227515697479248, "avg_line_length": 43.160377502441406, "blob_id": "ae72d90e4d16b6a752de0a5137c1d9bc5dbed8af", "content_id": "b6aa50ee977510cca9a0104205c5631ebac994fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4681, "license_type": "no_license", "max_line_length": 79, "num_lines": 106, "path": "/test/dcbParserTest.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport unittest\nimport sys\nfrom nose.tools import *\nfrom kpi.data.dcbE1E5 import DcbE1E5\nfrom kpi.differentialCodeBiasFilesDecoder.dcbParser import DCBParser\n\n\nclass testDCBParser(unittest.TestCase):\n\n def setUp(self):\n \"\"\"\n setup call, runs before each test\n \"\"\"\n self.configFile = \"test/resources/Configuration.cfg\"\n self.classToTest = DCBParser()\n self.prnIdListError = [\"E99\"]\n self.prnIdList = [\"E08\", \"E11\", \"E12\",\n \"E19\", \"E22\", \"E24\", \"E09\", \"E26\"]\n self.resourcePath = \"test/resources/dcbParser/\"\n\n self.timeTagCal = [\"2016 05 02 15 05 0\", \"2016 05 03 0 0 0\"]\n self.dcbList = [[self.resourcePath +\n \"test_CAS0MGXRAP_20161220000_01D_01D_DCB.BSX\",\n \"2016 05 02 0 0 0\"],\n [self.resourcePath +\n \"test_CAS0MGXRAP_20161230000_01D_01D_DCB.BSX\",\n \"2016 05 03 0 0 0\"]]\n\n self.dcbList_fileNotFound = [[self.resourcePath +\n \"CAS0MGXRAP_2016122_01D_01D_DCB.BSX\",\n \"2016 05 02 0 0 0\"],\n [self.resourcePath +\n \"CAS0MGXRAP_20161230000_01D_01D_DCB.BSX\",\n \"2016 05 03 0 0 0\"]]\n\n \"\"\"\n Test should sort and retrun the high priority\n frequency value 'C1X-C5X' in give list\n \"\"\"\n self.dcbListSortTest = []\n self.dcbListSortTest.append(DcbE1E5('E08', '16:122:00000',\n '16:123:00000', 'C1Q-C5X',\n '13.3350'))\n self.dcbListSortTest.append(DcbE1E5('E08', '16:122:00000',\n '16:123:00000', 'C1X-C5Q',\n '4.5980'))\n self.dcbListSortTest.append(DcbE1E5('E08', '16:122:00000',\n '16:123:00000', 'C1X-C5X',\n '13.3350'))\n self.dcbListSortTest.append(DcbE1E5('E08', '16:122:00000',\n '16:123:00000', 'C1Q-C5Q',\n '4.5980'))\n\n \"\"\"\n Test should sort and retrun the high priority\n frequency value from the list with the test value\n without the highest priority data and ordered randomly\n \"\"\"\n self.e1E5aFreqPrioListTest2 = []\n self.e1E5aFreqPrioListTest2.append(DcbE1E5('E08', '16:122:00000',\n '16:123:00000', 'C1Q-C5X',\n '13.3350'))\n self.e1E5aFreqPrioListTest2.append(DcbE1E5('E08', '16:122:00000',\n '16:123:00000', 'C1X-C5Q',\n '4.5980'))\n self.e1E5aFreqPrioListTest2.append(DcbE1E5('E08', '16:122:00000',\n '16:123:00000',\n 'C1Q-C5Q', '4.5980'))\n\n self.e1E5aFreqPrioList = ['C1X-C5X', 'C1Q-C5Q', 'C1Q-C5X', 'C1X-C5Q']\n\n def testDcbParserDailySuccess(self):\n dcbDailyPerSatEpoch, timeTagCalEpoch = self.classToTest.dcbParser(\n self.timeTagCal, self.dcbList, self.prnIdList[0])\n assert timeTagCalEpoch == self.timeTagCal\n\n def testDcbDataExtractorSuccess(self):\n dcb_data = self.classToTest.dcbDataExtractor(\n self.dcbList, self.timeTagCal, self.prnIdList)\n assert len(dcb_data) > 0\n\n @raises(Exception)\n def testDcbDataExtractorNoDCBValueException(self):\n assert self.classToTest.dcbDataExtractor(\n self.dcbList, self.timeTagCal, self.prnIdListError) is False\n\n @raises(Exception)\n def testDcbDataExtractorInputDcbFileNotFound(self):\n inputDcbFile = self.classToTest.dcbDataExtractor(\n self.dcbList_fileNotFound, self.timeTagCal, self.prnIdList)\n assert inputDcbFile is None\n\n def testSortOrder(self):\n sortedData = self.classToTest.filterDCBFromFreqPrioList(\n self.dcbListSortTest, self.e1E5aFreqPrioList)\n assert sortedData.FRQUENCY == self.e1E5aFreqPrioList[0]\n\n def testSortOrderTest2(self):\n sortedData = self.classToTest.filterDCBFromFreqPrioList(\n self.e1E5aFreqPrioListTest2, self.e1E5aFreqPrioList)\n assert sortedData.FRQUENCY == self.e1E5aFreqPrioList[1]\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.5765895843505859, "alphanum_fraction": 0.5794797539710999, "avg_line_length": 43.64516067504883, "blob_id": "4ce0d633311664ef6471a8d8cc31b5c19970cf11", "content_id": "1ffca8ed8a55c7ce66d1113931ea52c17380ffa9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1384, "license_type": "no_license", "max_line_length": 96, "num_lines": 31, "path": "/data/sysPhaseShift.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nclass SysPhaseShift:\n \"\"\"\n Assigned to Class variable of ObservationsDataHeader\n SYS_PHASE_SHIFTS (LIST) (SYS / PHASE SHIFT)\n Phase shift correction used to generate phases consistent w/r\n to cycle shifts\n Parameters:\n SATELLITE_SYSTEM - Satellite system (G/R/E/J/C/I/S)\n CARRIER_PHASE_OBSERVATION_CODE - Carrier phase observation code:\n Type\n Band\n Attribute\n CORRECTION_APPLIED - Correction applied (cycles)\n NUMBER_OF_SATELLITES - Number of satellites involved 0 or blank: All\n satellites of system\n LIST_OF_SATELLITES - List of satellites\n Use continuation line(s) for more\n than 10 satellites.\n \"\"\"\n\n def __init__(self, satelliteSystem=\"\", carrierPhaseObservationCode=\"\", correctionApplied=\"\",\n numberOfSatellites=\"\", listOfSatellites=\"\"):\n self.SATELLITE_SYSTEM = satelliteSystem\n self.CARRIER_PHASE_OBSERVATION_CODE = carrierPhaseObservationCode\n self.CORRECTION_APPLIED = correctionApplied\n self.NUMBER_OF_SATELLITES = numberOfSatellites\n self.LIST_OF_SATELLITES = listOfSatellites\n" }, { "alpha_fraction": 0.5782113671302795, "alphanum_fraction": 0.595772385597229, "avg_line_length": 37.924049377441406, "blob_id": "ab9668a21fa6b25d650e69dbb925499026b62b34", "content_id": "3d72ce7595790eb7cc4469e1d6901ebe973948a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3075, "license_type": "no_license", "max_line_length": 82, "num_lines": 79, "path": "/usergridgenerator/userGridGenerator.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport mpmath as math\nfrom kpi.helper.common import Common\n\n\nclass UserGridGenerator:\n \"\"\"\n This module generates the matrix of user positions located\n on a worldwide grid on earth\n \"\"\"\n log = Common.getLogger(__name__)\n # pParameters to define the earth area for the user grid\n gridSpace = 5\n maxLatitude = 90\n minLatitude = -85\n maxLongitude = 185\n minLongitude = -175\n # In meters\n semiMajorAxisRadius = 6378137.0\n firstEccentricity = 0.08181919\n firstEccentricitySquare = firstEccentricity ** 2\n totalOfLocations = 2520\n totalReturnParameters = 5\n height = 0\n\n def generateUserGrid(self):\n \"\"\"\n Parameters:\n No input parameters\n Return:\n userposition matrix\n Matrix Data:\n Loc_lon : Contains per user location longitude value in deg\n Loc_lat : Contains per user location latitude value in deg\n Loc_X : Contains per user location X position in ECEF in meters\n Loc_Y : Contains per user location Y position in ECEF in meters\n Loc_Z : Contains per user location Z position in ECEF in meters\n \"\"\"\n count = 0\n userPositions = [[0 for x in range(self.totalReturnParameters)]\n for y in range(self.totalOfLocations)]\n # Loop over latitude (-85 to 85) with gridspace of 5\n for latitude in range(self.minLatitude, self.maxLatitude,\n self.gridSpace):\n # Loop over longitude (-175 to 180) with gridspace of 5\n for longitude in range(self.minLongitude, self.maxLongitude,\n self.gridSpace):\n\n perLocationData = [longitude, latitude] + \\\n self.getECEFPositions(latitude, longitude)\n userPositions[count] = perLocationData\n count = count + 1\n return userPositions\n\n def getECEFPositions(self, latitude, longitude):\n \"\"\"\n This method return the X, Y and Z position in ECEF in meters\n per latitude and longitude\n\n Parameters:\n Latitude : user location latitude value in deg\n Longitude: user location longitude value in deg\n \"\"\"\n\n # Vertical radius of curvature\n radCurvN = self.semiMajorAxisRadius / \\\n (math.sqrt(1 - (self.firstEccentricitySquare *\n math.sin(math.radians(latitude))**2)))\n # In meters\n xCoordinate = (radCurvN + self.height) * \\\n (math.cos(math.radians(latitude)) * math.cos(math.radians(longitude)))\n # In meters\n yCoordinate = (radCurvN + self.height) * \\\n (math.cos(math.radians(latitude)) * math.sin(math.radians(longitude)))\n # In meters\n zCoordinate = ((radCurvN * (1 - self.firstEccentricitySquare)\n ) + self.height) * math.sin(math.radians(latitude))\n return [xCoordinate, yCoordinate, zCoordinate]\n" }, { "alpha_fraction": 0.5991984009742737, "alphanum_fraction": 0.617234468460083, "avg_line_length": 20.69565200805664, "blob_id": "289699e49a91e8fffbd99047f4fd2180a25e3f85", "content_id": "9a21ed937a4edcdd14d097ffa00371ff599f9472", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 499, "license_type": "no_license", "max_line_length": 91, "num_lines": 23, "path": "/sisre/sisrestatistics.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Author: Thomas Schneider\n# Date : 13.09.2016\n\nimport sys\nfrom kpi.helper.common import Common\nfrom kpi.sisre.svattitude import SVAttitude\n\n\nclass SISREStatistics:\n \"\"\"\n calculates signal-in-space range error for individual satellites orbit and clock error.\n \"\"\"\n\n def __init__(self, date):\n self.date = date\n\n def computeSISRE(self, vecErr, clkErr, S):\n return \"\"\n\n def __str__(self):\n return __name__ + \"(\" + date + \")\"\n" }, { "alpha_fraction": 0.5494057536125183, "alphanum_fraction": 0.5584040880203247, "avg_line_length": 32.46590805053711, "blob_id": "382622962077c245fd2745e68ce1b388533003c5", "content_id": "ed5c419200e9ae617f16822687e17fd61bae0c26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5893, "license_type": "no_license", "max_line_length": 84, "num_lines": 176, "path": "/sisre/svattitude.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport logging\nimport numpy as np\nimport sys\nfrom mpmath import *\nfrom scipy import mat, dot, matmul\nfrom decimal import Decimal\nfrom novas import compat as novas\nfrom novas.compat import eph_manager\nfrom novas.compat import *\nfrom novas.compat import solsys\nfrom novas import novaslib\nfrom kpi.helper.common import Common\nfrom kpi.data.svRefPVTCoM import SvRefPVTCoM\n\njd_start, jd_end, number = eph_manager.ephem_open()\n\n\nclass SVAttitude:\n \"\"\"\n calculates the sun vector on a given date and creates unit vector matrices\n for a satellites velocity S and position U in ECEF coordinates\n usage:\n sv = SVAttitude(date)\n sv.computeRotationAndSpeedMatrices(gals)\n \"\"\"\n log = Common.getLogger(__name__)\n jd = 0.0 # julian date\n rsun = () # normed unit sun vector in ECEF\n\n def __init__(self, year, month, day, hours, minutes, seconds, leapseconds):\n \"\"\"\n calculates the sun-vector in ECEF\n \"\"\"\n # TODO: use leapseconds\n date = (year, month, day, hours, minutes, seconds + leapseconds)\n jd = self.julianDaysUTC(year, month, day, hours,\n minutes, seconds, leapseconds)\n GHAM = self.transGHAM(self.siderealGAST(jd))\n rsECI = self.heliosECI(jd)\n rsun = self.heliosECEF(GHAM, rsECI)\n self.jd = jd\n self.rsun = rsun\n self.log.info('Julian Date: %s ==> %s', SVAttitude.toString(\n '-', date), jd)\n self.log.debug('GHAM:\\n%s', GHAM)\n self.log.debug('Sun-Vector-ECI : %s', rsECI)\n self.log.info('Sun-Vector-ECEF : %s', rsun)\n\n def julianDaysUTC(self, year, month, day, hours, minutes, seconds, leapseconds):\n sec = seconds - leapseconds\n if sec < 0:\n minutes -= 1\n sec += 60\n time = hours + minutes / 60 + sec / 3600\n return novas.julian_date(year, month, day, hour=time)\n\n def toString(sep, pars):\n s = \"\"\n for x in pars:\n s += str(x) + sep\n return s\n\n def siderealtimeGWM(self, jd):\n return novas.sidereal_time(jd, 0, 0)\n\n def nutationAngles(self, jd):\n return novas.nutation_angles(jd)[0]\n\n def equinox(self, jd):\n mo = novas.mean_obliq(jd)\n return self.nutationAngles(jd) * cos(mo)\n\n def siderealGAST(self, jd):\n GAST = (self.siderealtimeGWM(jd) + self.equinox(jd)) % (pi * 2)\n return GAST\n\n def transGHAM(self, GAST):\n GHAM = mat([[cos(GAST), sin(GAST), 0],\n [-sin(GAST), cos(GAST), 0], [0, 0, 1]])\n return GHAM\n\n def heliosECI(self, jd):\n # error in c-call: python needs float, c needs iterable\n # r = solsys.solarsystem_hp(jd, 10, 2)\n r, v = solsys.solarsystem(jd, 10, 2)\n return r\n\n def enorm(self, v):\n n = np.linalg.norm(v)\n if n == 0:\n # Exception('given vector has length zero:', v)\n return v\n return v / n\n\n def heliosECEF(self, GHAM, rsun):\n v = GHAM.dot(rsun)\n return self.enorm(v)\n\n def computeRotationAndSpeedMatrice(self, gal):\n \"\"\"\n computes the speed rotation matrice S and position rotation matrice U\n gal: array having 3 position and 3 velocity values.\n \"\"\"\n # TODO: mit Maxi klären, ob wir einfache Tupel-Strukturen verwenden\n # TODO: mit Maxi klären, warum wir in den Strukturen mit strings\n # arbeiten\n if isinstance(gal, SvRefPVTCoM):\n #TODO: dürfen die Positionen undefiniert sein?\n if gal.X_com is None:\n #Exception(\"position parameters x, y, z must be filled!\")\n gal.X_com = nan\n if gal.Y_com is None:\n gal.Y_com = nan\n if gal.Z_com is None:\n gal.Z_com = nan\n x, y, z = mpf(gal.X_com), mpf(\n gal.Y_com), mpf(gal.Z_com)\n if not gal.Vx_com is None:\n vx, vy, vz = mpf(gal.Vx_com), mpf(\n gal.Vy_com), mpf(gal.Vz_com)\n else:\n vx, vy, vz = nan, nan, nan\n else:\n x, y, z = gal[0], gal[1], gal[2]\n vx, vy, vz = gal[3], gal[4], gal[5]\n\n r = np.array([x, y, z]).T\n if isnan(vx) or isnan(vy) or isnan(vz):\n v = (0, 0, 0) # TODO: use velocity from broadcast ephemeris\n else:\n v = np.array([vx, vy, vz]).T\n\n # do the main calculation...\n # TODO: use numpy.linalg.eig\n e1 = self.enorm(r) # ecl2equ_ve(self.jd, gal)\n e3 = self.enorm(np.cross(r, v))\n e2 = np.cross(e3, e1)\n S = (e1, e2, e3)\n self.log.debug(\"%s, %s, %s ==> %s\", x, y, z, S)\n\n # do the main calculation...\n ez = -e1 # ecl2equ_ve(self.jd, gal)\n ey = self.enorm(np.cross(self.rsun, r))\n ex = np.cross(ey, ez)\n U = (ex, ey, ez)\n self.log.debug(\"%s, %s, %s ==> %s\", vx, vy, vz, U)\n\n return U, S\n\n def computeRotationAndSpeedMatrices(self, gals):\n \"\"\"\n main compuation method. using the given 2-dim array to calculate\n all S and U matrices for given satellite posistions and velocities.\n delegates the calculation per satellite \n to computeRotationAndSpeedMatrice(gal)\n gals: position and velocity values for several satellites\n \"\"\"\n US = ()\n for gal in gals:\n US += self.computeRotationAndSpeedMatrice(gal)\n return US\n\n def reached(self, endDate):\n \"\"\"\n checks, if the internal julian day is equal or after the given\n endDate.\n endDate: calendar values in an array [year, month, day, hours]\n \"\"\"\n end = self.julianDaysUTC(*endDate, 0)\n return self.jd >= end\n\n def __str__(self):\n return \"jd: \" + self.jd + \" rsun: \" + self.rsun\n" }, { "alpha_fraction": 0.5546666383743286, "alphanum_fraction": 0.6070476174354553, "avg_line_length": 40.66666793823242, "blob_id": "277c4f13158ae55d428b20e4f7679d8103af2d4f", "content_id": "cf7f5838837f69dd2436e303c68892cf9dd64113", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5250, "license_type": "no_license", "max_line_length": 87, "num_lines": 126, "path": "/test/antennaOffsetFilesDecoderTest.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport unittest\nimport sys\nfrom nose.tools import *\nfrom kpi.antennaOffsetFilesDecoderModule.antennaOffsetParser import AntennaOffsetParser\n\n\nclass testAntennaOffsetParser(unittest.TestCase):\n\n def setUp(self):\n \"\"\"\n setup call, runs before each test\n \"\"\"\n\n self.confiFile = \"test/resources/Configuration.cfg\"\n self.classToTest = AntennaOffsetParser()\n self.resourcePathTest = \"test/resources/antennaOffsetParserModule/\"\n self.prnIdList = [\"E08\", \"E09\", \"E11\", \"E12\",\n \"E14\", \"E18\", \"E19\", \"E20\",\n \"E22\", \"E24\", \"E26\", \"E30\", \"E51\", \"E52\"]\n self.prnIdTempList = [\"E08\"]\n self.prnIdListNotExist = [\"AAA\"]\n self.timeTagCal = [\"2016 05 01 15 05 0\", \"2016 05 02 0 0 0\",\n \"2016 05 03 15 05 0\", \"2016 05 04 15 05 0\",\n \"2016 05 05 15 05 0\", \"2016 05 06 15 05 0\",\n \"2016 05 07 15 05 0\", \"2016 05 08 15 05 0\",\n \"2016 05 09 15 05 0\", \"2016 05 10 15 05 0\",\n \"2016 05 11 15 05 0\", \"2016 05 12 15 05 0\",\n \"2016 05 13 15 05 0\", \"2016 05 14 15 05 0\",\n \"2016 05 15 15 05 0\", \"2016 05 16 15 05 0\",\n \"2016 05 17 15 05 0\"]\n self.apcOffRefFile = self.resourcePathTest + \"test_igs08_1887.atx\"\n self.apcOffBceFile = self.resourcePathTest + \"test_igs08_1887_bce.atx\"\n self.apcTypeTagNotExist = self.resourcePathTest + \\\n \"test_format_exp_type_tag_Not_exist.atx\"\n self.apcNumFreqTag = self.resourcePathTest + \\\n \"test_format_exp_num_freq_notExist.atx\"\n self.apcOffRefFileNotExist = (self.resourcePathTest +\n \"test_igs08_1887_notexist.atx\")\n self.apcGregoruanFormatTest = self.resourcePathTest + \\\n \"test_gregorian_format.atx\"\n\n def testApcDataExtractor(self):\n \"\"\"\n Test for single PRN with all timetagcals\n The result should return pro PNR pro timetagcal\n\n Test with single Data (prnIdTempList = [\"E08\"])\n \"\"\"\n apc_nav, apc_pro = self.classToTest.antennaOffsetParser(\n self.apcOffRefFile, self.apcOffBceFile)\n apcOffRef, apcOffBce = self.classToTest.apcDataExtractor(\n apc_nav, apc_pro, self.timeTagCal, self.prnIdTempList)\n assert len(apcOffRef) == len(self.timeTagCal) * \\\n len(self.prnIdTempList)\n assert len(apcOffBce) == len(self.timeTagCal) * \\\n len(self.prnIdTempList)\n\n def testApcDataExtractorListPrn(self):\n \"\"\"\n Test for single PRN with all timetagcals\n The result should return pro PNR pro timetagcal\n\n Test with List Data (prnIdList = [\"E08\"])\n \"\"\"\n apc_nav, apc_pro = self.classToTest.antennaOffsetParser(\n self.apcOffRefFile, self.apcOffBceFile)\n apcOffRef, apcOffBce = self.classToTest.apcDataExtractor(\n apc_nav, apc_pro, self.timeTagCal, self.prnIdList)\n assert len(apcOffRef) == len(self.timeTagCal) * \\\n len(self.prnIdList)\n assert len(apcOffBce) == len(self.timeTagCal) * \\\n len(self.prnIdList)\n\n def testApcDataExtractorNonExistingPrn(self):\n \"\"\"\n Test for single PRN with all timetagcals\n The result should return pro PNR pro timetagcal\n\n Test with invalid Data (prnIdList = [\"AAA\"])\n \"\"\"\n apc_nav, apc_pro = self.classToTest.antennaOffsetParser(\n self.apcOffRefFile, self.apcOffBceFile)\n apcOffRef, apcOffBce = self.classToTest.apcDataExtractor(\n apc_nav, apc_pro, self.timeTagCal, self.prnIdListNotExist)\n assert len(apcOffRef) == len(self.timeTagCal) * \\\n len(self.prnIdListNotExist)\n assert len(apcOffBce) == len(self.timeTagCal) * \\\n len(self.prnIdListNotExist)\n\n def testAntennaOffsetParserFunction(self):\n # Test parsing of files and list output\n apc_nav, apc_pro = self.classToTest.antennaOffsetParser(\n self.apcOffRefFile, self.apcOffBceFile)\n assert len(apc_pro) > 1\n assert len(apc_nav) > 1\n\n @raises(Exception)\n def testFileNotExist(self):\n \"\"\"\n Raises Exception if file is not found\n \"\"\"\n apcPro, apcNav = self.classToTest.antennaOffsetParser(\n self.apcOffRefFileNotExist, self.apcOffBceFile)\n\n @raises(Exception)\n def testFormatExceptionNumFreqTag(self):\n # Test type tag after start antenna tag\n apc_nav, apc_pro = self.classToTest.antennaOffsetParser(\n self.apcNumFreqTag, self.apcOffBceFile)\n\n @raises(Exception)\n def testValidFromTagNotExist(self):\n # Test parsing of files and list output\n apc_nav, apc_pro = self.classToTest.antennaOffsetParser(\n self.apcTypeTagNotExist, self.apcOffBceFile)\n\n @raises(Exception)\n def testGregorianFormat(self):\n # Test if there are 6 calender elements in Date\n self.classToTest.antennaOffsetParser(\n self.apcGregoruanFormatTest, self.apcOffBceFile)\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.6842105388641357, "alphanum_fraction": 0.6973684430122375, "avg_line_length": 41.22222137451172, "blob_id": "9eb41c2217d636813ea2dc9b9df5a053b4d3a4f3", "content_id": "f35fc49e5542fb9d18d9231ffedd2e97bc4fee1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1140, "license_type": "no_license", "max_line_length": 79, "num_lines": 27, "path": "/data/glonassCodPhsBis.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nclass GlonassCodPhsBis:\n \"\"\"\n Author: Bhaskar Shankar (IDV-AG)\n Date : 05 Sept 2016\n\n Assigned to Class variable of ObservationsDataHeader\n VAR - GLONASS_COD_PHS_BIS\n Doc:\n GLONASS Phase bias correction used to align code and phase observations\n GLONASS signal identifier : C1C and Code Phase bias correction (metres)\n GLONASS signal identifier : C1P and Code Phase bias correction (metres)\n GLONASS signal identifier : C2C and Code Phase bias correction (metres)\n GLONASS signal identifier : C2P and Code Phase bias correction (metres)\n Note: If the GLONASS code phase bias values are unknown then all fields\n in the record are left blank\n Parameters:\n GLONASS_SIGNAL_IDENTIFIER - GLONASS signal identifier(C1C,C2C,C1P,C2P)\n CODE_PHASE_BIAS_CORRECTION - Code Phase bias correction (metres)\n \"\"\"\n\n def __init__(self, glonassSignalIdentifier=\"\", codePhaseBiasCorrection=\"\"):\n self.GLONASS_SIGNAL_IDENTIFIER = glonassSignalIdentifier\n self.CODE_PHASE_BIAS_CORRECTION = codePhaseBiasCorrection\n" }, { "alpha_fraction": 0.44247788190841675, "alphanum_fraction": 0.464749276638031, "avg_line_length": 48.67033004760742, "blob_id": "58a9b96b5212489a33ac38ce263f90ad14e40dce", "content_id": "9bd3300a39629483b7d1e1ef8200468c0880b337", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13560, "license_type": "no_license", "max_line_length": 79, "num_lines": 273, "path": "/broadcastnavigationdecoder/rinex3NAVParser.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom decimal import *\nimport sys\nfrom kpi.data.navHeaderModel import NavHeaderModel\nfrom kpi.data.navDataModel import NavDataModel\nfrom kpi.data.timeSystemCorrectionModel import TimeSystemCorrectionModel\nfrom kpi.helper.common import Common\n\n\nclass BRDMParser:\n\n log = Common.getLogger('Rinex3NAVParser')\n\n def rinex3navParser(self, bceList):\n self.log.info(\"Starting RINEX 3 NAV parser...\")\n NAV_datasets = []\n NAV_headers = []\n for rinexFile in bceList:\n rinexData = open(rinexFile)\n navHeaderModel = NavHeaderModel()\n navDataModel = None\n if rinexData is None:\n self.log.error(\"RINEX %s File was not found.\", rinexFile)\n isHeader = True\n isGalileoData = False\n timeSysCorrections = []\n comments = []\n brdCastLine = 0\n self.log.info(\"Parse header...\")\n for line in rinexData:\n if isHeader:\n if \"END OF HEADER\" in line:\n isHeader = False\n self.log.info(\"Header parsed.\")\n self.log.info(\"Parse broadcastfields...\")\n else:\n self.parseHeaderFields(\n navHeaderModel, line, comments, timeSysCorrections)\n else:\n # parse broadcast fields\n # only boardcast fields for galileo satellites are needed\n # if parser started, but there is data for next satellite\n # before broadcast is at the end. warning is logged.\n if line.startswith(\"E\"):\n isGalileoData = True\n if brdCastLine >= 7:\n NAV_datasets.append(navDataModel)\n elif brdCastLine < 7 and brdCastLine > 0:\n self.log.warning(\"Nav Rinex Data for %s %s \" +\n \" in wrong format.\" +\n \"Not 7 Broadcast fields.\",\n navDataModel.PRN_ID,\n navDataModel.TIME_TAG)\n brdCastLine = 0\n navDataModel = NavDataModel()\n elif not line.startswith(\"E\") and not line.startswith(\" \"):\n # end of the Galileo Data block\n # if parser started, but there is data for next\n # satellite\n # before broadcast is at the end. warning is logged.\n if brdCastLine >= 7:\n brdCastLine = 0\n NAV_datasets.append(navDataModel)\n elif (isGalileoData and\n brdCastLine < 7 and brdCastLine > 0):\n self.log.warning(\"Nav Rinex Data for %s %s \" +\n \" in wrong format.\" +\n \"Not 7 Broadcast fields.\",\n navDataModel.PRN_ID,\n navDataModel.TIME_TAG)\n isGalileoData = False\n if isGalileoData:\n self.parseBroadcastFields(\n brdCastLine, navDataModel, line)\n brdCastLine += 1\n # if parser started, but there is data for next satellite\n # before broadcast is at the end. warning is logged.\n if isGalileoData and brdCastLine >= 7:\n NAV_datasets.append(navDataModel)\n elif (isGalileoData and\n brdCastLine < 7 and brdCastLine > 0):\n self.log.warning(\"Nav Rinex Data for %s %s \" +\n \" in wrong format.\" +\n \"Not 7 Broadcast fields.\",\n navDataModel.PRN_ID,\n navDataModel.TIME_TAG)\n self.log.info(\"Broadcast fields parsed.\")\n if isHeader and navHeaderModel.FORMAT_VERSION is \"\":\n self.log.error(\"No header data in RINEX File %s\",\n rinexFile)\n elif isHeader:\n self.log.error(\"No 'End of header' Tag is in RINEX File %s\",\n rinexFile)\n if not isHeader and len(NAV_datasets) < 1:\n self.log.error(\"No galileo broadcast data in RINEX File %s\",\n rinexFile)\n # set header data\n navHeaderModel.TIME_SYSTEM_CORRECTION = timeSysCorrections\n navHeaderModel.COMMENT = comments\n NAV_headers.append(navHeaderModel)\n rinexData.close()\n\n return NAV_headers, NAV_datasets\n\n def parseBroadcastFields(self, brdCastLine, navDataModel, line):\n \"\"\"\n parsed all the broadcast fields. brdCastLine counts the line\n parameter:\n brdCastLine int: counts the line for the galileo data\n navDataModel NavDataModel: object data of the satellite\n line string: actual line to parse\n \"\"\"\n if brdCastLine == 0:\n fields = line.split()\n navDataModel.SATELLITE_SYSTEM = fields[0][0:1]\n navDataModel.PRN_ID = fields[0]\n navDataModel.EPOCH = fields[1]\n navDataModel.TIME_TAG = fields[\n 2] + \" \" + fields[3] + \" \" + fields[\n 4] + \" \" + fields[5] + \" \" + fields[6]\n navDataModel.SV_CLOCK_BIAS = fields[7]\n navDataModel.SV_CLOCK_DRIFT = fields[8]\n navDataModel.SV_CLOCK_DRIFT_RATE = fields[9]\n elif brdCastLine == 1:\n if (len(line[4:23].replace(\" \", \"\")) or\n len(line[23:42].replace(\" \", \"\")) or\n len(line[42:61].replace(\" \", \"\")) or\n len(line[61:80].replace(\" \", \"\"))) < 1:\n self.log.warning(\"Nav Rinex Data for %s %s in wrong format.\" +\n \" Check Broadcast Orbit %i\",\n navDataModel.PRN_ID,\n navDataModel.TIME_TAG, brdCastLine)\n navDataModel.IOD_NAV = line[4:23].replace(\" \", \"\")\n navDataModel.CRS = line[23:42].replace(\" \", \"\")\n navDataModel.DELTA_N = line[42:61].replace(\" \", \"\")\n navDataModel.M_0 = line[61:80].replace(\" \", \"\")\n elif brdCastLine == 2:\n if (len(line[4:23].replace(\" \", \"\")) or\n len(line[23:42].replace(\" \", \"\")) or\n len(line[42:61].replace(\" \", \"\")) or\n len(line[61:80].replace(\" \", \"\"))) < 1:\n self.log.warning(\"Nav Rinex Data for %s %s in wrong format.\" +\n \" Check Broadcast Orbit %i\",\n navDataModel.PRN_ID,\n navDataModel.TIME_TAG, brdCastLine)\n navDataModel.CUC = line[4:23].replace(\" \", \"\")\n navDataModel.ECCENTRICITY = line[\n 23:42].replace(\" \", \"\")\n navDataModel.CUS = line[42:61].replace(\" \", \"\")\n navDataModel.SQRT_A = line[61:80].replace(\" \", \"\")\n elif brdCastLine == 3:\n if (len(line[4:23].replace(\" \", \"\")) or\n len(line[23:42].replace(\" \", \"\")) or\n len(line[42:61].replace(\" \", \"\")) or\n len(line[61:80].replace(\" \", \"\"))) < 1:\n self.log.warning(\"Nav Rinex Data for %s %s in wrong format.\" +\n \" Check Broadcast Orbit %i\",\n navDataModel.PRN_ID,\n navDataModel.TIME_TAG, brdCastLine)\n navDataModel.TOE = line[4:23].replace(\" \", \"\")\n navDataModel.CIC = line[23:42].replace(\" \", \"\")\n navDataModel.OMEGA_0 = line[42:61].replace(\" \", \"\")\n navDataModel.CIS = line[61:80].replace(\" \", \"\")\n elif brdCastLine == 4:\n if (len(line[4:23].replace(\" \", \"\")) or\n len(line[23:42].replace(\" \", \"\")) or\n len(line[42:61].replace(\" \", \"\")) or\n len(line[61:80].replace(\" \", \"\"))) < 1:\n self.log.warning(\"Nav Rinex Data for %s %s in wrong format.\" +\n \" Check Broadcast Orbit %i\",\n navDataModel.PRN_ID,\n navDataModel.TIME_TAG, brdCastLine)\n navDataModel.I_0 = line[4:23].replace(\" \", \"\")\n navDataModel.CRC = line[23:42].replace(\" \", \"\")\n navDataModel.OMEGA = line[42:61].replace(\" \", \"\")\n navDataModel.OMEGA_DOT = line[\n 61:80].replace(\" \", \"\")\n elif brdCastLine == 5:\n if (len(line[4:23].replace(\" \", \"\")) or\n len(line[23:42].replace(\" \", \"\")) or\n len(line[42:61].replace(\" \", \"\"))) < 1:\n self.log.warning(\"Nav Rinex Data for %s %s in wrong format.\" +\n \" Check Broadcast Orbit %i\",\n navDataModel.PRN_ID,\n navDataModel.TIME_TAG, brdCastLine)\n navDataModel.IDOT = line[4:23].replace(\" \", \"\")\n navDataModel.DATA_SOURCES = Decimal(line[\n 23:42].replace(\" \", \"\"))\n navDataModel.GAL_WEEK = line[\n 42:61].replace(\" \", \"\")\n elif brdCastLine == 6:\n if (len(line[4:23].replace(\" \", \"\")) or\n len(line[23:42].replace(\" \", \"\")) or\n len(line[42:61].replace(\" \", \"\")) or\n len(line[61:80].replace(\" \", \"\"))) < 1:\n self.log.warning(\"Nav Rinex Data for %s %s in wrong format.\" +\n \" Check Broadcast Orbit %i\",\n navDataModel.PRN_ID,\n navDataModel.TIME_TAG, brdCastLine)\n navDataModel.SISA_SIGNAL = line[\n 4:23].replace(\" \", \"\") if len(line[\n 4:23].replace(\" \", \"\")) > 0 else -1.0\n navDataModel.SV_HEALTH = Decimal(line[\n 23:42].replace(\" \", \"\"))\n navDataModel.BGD_E5_A = line[\n 42:61].replace(\" \", \"\")\n navDataModel.BGD_E5_B = line[\n 61:80].replace(\" \", \"\")\n elif brdCastLine == 7:\n if (len(line[4:23].replace(\" \", \"\"))) < 1:\n self.log.warning(\"Nav Rinex Data for %s %s in wrong format.\" +\n \"Check Broadcast Orbit %i\",\n navDataModel.PRN_ID,\n navDataModel.TIME_TAG, brdCastLine)\n else:\n navDataModel.TTOM = line[4:23].replace(\" \", \"\")\n\n def parseHeaderFields(self, navHeaderModel,\n line, comments, timeSysCorrections):\n \"\"\"\n parsed the header fields.\n in each line there is a description label like \"COMMENT\"\n navHeaderModel NavHeaderModel: actual navHeaderModel\n line string: actual line to parse\n comments [string]: list where all the comments stored\n timeSysCorrections [timeSysCorrections]: list where all the\n timeSysCorrections are stored\n \"\"\"\n if \"RINEX VERSION / TYPE\" in line:\n fields = list(filter(None, line.split(\" \")))\n navHeaderModel.FORMAT_VERSION = fields[\n 0].replace(\" \", \"\")\n navHeaderModel.FILE_TYPE = fields[1]\n navHeaderModel.SATELLITE_SYSTEM = fields[2]\n elif \"PGM / RUN BY / DATE\" in line:\n fields = list(filter(None, line.split()))\n navHeaderModel.PROGRAMM_NAME = fields[0]\n navHeaderModel.AGENCY_NAME = fields[1]\n navHeaderModel.TIME_TAG = fields[2] + \" \" + fields[3]\n navHeaderModel.TIME_ZONE = fields[4]\n elif \"COMMENT\" in line:\n comments.append(line.replace(\"COMMENT\", \"\"))\n elif \"TIME SYSTEM CORR\" in line:\n fields = list(filter(None, line.split(\" \")))\n correlation = (fields[1], fields[2])\n # special case. correlations could be split into\n # one field when: correlation a1 is minus\n # -6.1118043959e-10-5.773159728e-15\n if len(fields[1]) > 17:\n correlation = fields[1][0:len(\n fields[1]) - 16], fields[1][len(\n fields[1]) - 16:]\n i = len(fields) - 1\n while i > 1:\n if i == len(fields) - 1:\n fields.append(fields[i])\n else:\n fields[i + 1] = fields[i]\n i -= 1\n # get tscS, default is \"SNN\"\n tscS = fields[5] if \"TIME\" not in fields[5] else \"SNN\"\n # get tscUTCId default is \"0\"\n tscUTCId = fields[\n 6] if \"SYSTEM\" not in fields[6] else \"0\"\n tscModel = TimeSystemCorrectionModel(fields[0],\n correlation[0],\n correlation[1],\n fields[\n 3], fields[4],\n tscS,\n tscUTCId)\n timeSysCorrections.append(tscModel)\n" }, { "alpha_fraction": 0.5410199761390686, "alphanum_fraction": 0.5476718544960022, "avg_line_length": 22.736841201782227, "blob_id": "94bba5623f553ba67d4882668e5f8c643b5cf8ef", "content_id": "c71a746aaba93942e3a3b32756fdca507eed3ef3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 451, "license_type": "no_license", "max_line_length": 54, "num_lines": 19, "path": "/data/positionVectorXYZ.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nclass PositionVectorXYZ:\n \"\"\"\n Section: 5.6 : Observer Files Decoder Module\n Class varialbe at ObservationsDataHeader class for\n APPROX POSITION XYZ\n ANTENNA: DELTA X/Y/Z\n ANTENNA: ZERODIR XYZ\n CENTER OF MASS: XYZ\n\n \"\"\"\n\n def __init__(self, x=\"\", y=\"\", z=\"\"):\n self.X_POSITION = x\n self.Y_POSITION = y\n self.Z_POSITION = z\n" }, { "alpha_fraction": 0.5562372207641602, "alphanum_fraction": 0.6032719612121582, "avg_line_length": 29.5625, "blob_id": "eea9898a36bf36fdfdd9335e94a093c4c7b8027b", "content_id": "242be00ecad6fde060592745f5b12fa4d3967467", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 489, "license_type": "no_license", "max_line_length": 71, "num_lines": 16, "path": "/data/dcbData.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nclass DcbData:\n \"\"\"\n Class with timeTagCal per PNRID basis with DCB data for\n frequency categories - DCB_E1_E5a and DCB_E1_E5b\n TIME_TAG_CAL = Given Time Tag Cal\n DCB_E1_E5a, DCB_E1_E5b = instances of DCB_E1_E5 for frequencies\n \"\"\"\n\n def __init__(self, timeTagCal=\"\", dcbE1E5a=\"\", dcbE1E5b=\"\"):\n self.TIME_TAG_CAL = timeTagCal\n self.DCB_E1_E5a = dcbE1E5a\n self.DCB_E1_E5b = dcbE1E5b\n" }, { "alpha_fraction": 0.6210995316505432, "alphanum_fraction": 0.6285289525985718, "avg_line_length": 29.590909957885742, "blob_id": "a0309d3726ab66a527ace0b8bdf95ee81123b040", "content_id": "a4e18befd77e56c55c414e7157fb1916228b0f07", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 673, "license_type": "no_license", "max_line_length": 72, "num_lines": 22, "path": "/data/dcbE1E5.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nclass DcbE1E5:\n \"\"\"\n Class for each retrun object with PNRID, biasStart and\n biasEnd in nanoseconds and frequency\n PNR_ID = The PRN ID\n BIAS_START = Differential Code BIAS Start\n BIAS_END = Differential Code BIAS End\n FRQUENCY = Type of Frequency\n ESTIMATED_VALUE = Difference between E1 and E5 in Seconds\n \"\"\"\n\n def __init__(self, pnrId=\"\", biasStart=\"\", biasEnd=\"\", frequency=\"\",\n estimatedValue=\"\"):\n self.PNR_ID = pnrId\n self.BIAS_START = biasStart\n self.BIAS_END = biasEnd\n self.FRQUENCY = frequency\n self.ESTIMATED_VALUE = estimatedValue\n" }, { "alpha_fraction": 0.5679113864898682, "alphanum_fraction": 0.5802215933799744, "avg_line_length": 38.626014709472656, "blob_id": "a3503e89c51f04666ec789a27ad8308db7bbbb78", "content_id": "a7387e46f4df61a2786fda7d10353801d95585e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4874, "license_type": "no_license", "max_line_length": 71, "num_lines": 123, "path": "/data/navDataModel.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nclass NavDataModel:\n \"\"\"\n nav data object. inherit all the data record variables extracted\n from RINEX file.\n Parameter:\n SATELLITE_SYSTEM system string: satellite system \"E\": Galileo\n PRN_ID string: satellite id\n EPOCH string: Toc - Time of Clock GALyear\n TIME_TAG string: month=\"\", day=\"\", hour=\"\", minute=\"\", second\n SV_CLOCK_BIAS string: SV clock bias (seconds) af0\n SV_CLOCK_DRIFT string: SV clock drift (sec/sec) af1\n SV_CLOCK_DRIFT_RATE string: SV clock drift rate (sec/sec2) af2\n Br.Orbit-5=\"\", data source=\"\", bits 8+9)\n IOD_NAV string: IODnav Issue of Data of the nav batch\n CRS string: Crs (meters)\n DELTA_N string: Delta n (radians/sec)\n M_0 string: M0 (radians)\n CUC string: Cuc (radians)\n ECCENTRICITY string: e Eccentricity\n CUS string: Cus (radians)\n SQRT_A string: sqrt(a) (sqrt(m))\n TOE string: Toe Time of Ephemeris (sec of GAL week)\n CIC string: Cic (radians)\n OMEGA_0 string: OMEGA0 (radians)\n CIS string: Cis (radians)\n I_0 string: i0 (radians)\n CRC string: Crc (meters)\n OMEGA string: omega (radians)\n OMEGA_DOT string: OMEGA DOT (radians/sec)\n IDOT string: IDOT (radians/sec)\n DATA_SOURCES Decimal: Data sources (FLOAT --> INTEGER)\n detail description in file:\n resources/references/definitin/documentsrinex303.pdv\n GAL_WEEK string: GAL Week # (to go with Toe)\n SISA_SIGNAL string: SISA Signal in space accuracy (meters)\n SV_HEALTH Decimal: SV health (FLOAT converted to INTEGER)\n detail description in file:\n resources/references/definitin/documentsrinex303.pdv\n BGD_E5_A string: BGD E5a/E1 (seconds)\n BGD_E5_B string: BGD E5b/E1 (seconds)\n TTOM string: Transmission time of message\n \"\"\"\n\n def __init__(self, satelliteSystem=\"\",\n prnId=\"\", epoch=\"\", timeTag=\"\",\n svClockBias=\"\", svClockDrift=\"\", svClockDriftRate=\"\",\n ioDnav=\"\", crs=\"\", deltaN=\"\", m0=\"\", cuc=\"\",\n eccentricity=\"\", cus=\"\", sqrtA=\"\",\n toeTime=\"\", cic=\"\", omega0=\"\", cis=\"\", i0=\"\",\n crc=\"\", omega=\"\", omegaDot=\"\",\n iDot=\"\", dataSources=\"\", galWeek=\"\",\n sisaSignal=\"-1.0\", svHealth=\"\", bgdE5a=\"\", bgdE5b=\"\",\n ttom=\"0.9999e+09\"):\n self.SATELLITE_SYSTEM = satelliteSystem\n self.PRN_ID = prnId\n self.EPOCH = epoch\n self.TIME_TAG = timeTag\n self.SV_CLOCK_BIAS = svClockBias\n self.SV_CLOCK_DRIFT = svClockDrift\n self.SV_CLOCK_DRIFT_RATE = svClockDriftRate\n self.IOD_NAV = ioDnav\n self.CRS = crs\n self.DELTA_N = deltaN\n self.M_0 = m0\n self.CUC = cuc\n self.ECCENTRICITY = eccentricity\n self.CUS = cus\n self.SQRT_A = sqrtA\n self.TOE = toeTime\n self.CIC = cic\n self.OMEGA_0 = omega0\n self.CIS = cis\n self.I_0 = i0\n self.CRC = crc\n self.OMEGA = omega\n self.OMEGA_DOT = omegaDot\n self.IDOT = iDot\n self.DATA_SOURCES = dataSources\n self.GAL_WEEK = galWeek\n self.SISA_SIGNAL = sisaSignal\n self.SV_HEALTH = svHealth\n self.BGD_E5_A = bgdE5a\n self.BGD_E5_B = bgdE5b\n self.TTOM = ttom\n\n def printMe(self):\n \"\"\"\n prints all the values of the navData\n \"\"\"\n print(\"model.SATELLITE_SYSTEM = \", self.SATELLITE_SYSTEM)\n print(\"model.EPOCH = \", self.EPOCH)\n print(\"model.TIME_TAG = \", self.TIME_TAG)\n print(\"model.SV_CLOCK_BIAS = \", self.SV_CLOCK_BIAS)\n print(\"model.SV_CLOCK_DRIFT = \", self.SV_CLOCK_DRIFT)\n print(\"model.SV_CLOCK_DRIFT_RATE = \", self.SV_CLOCK_DRIFT_RATE)\n print(\"model.IOD_NAV = \", self.IOD_NAV)\n print(\"model.CRS = \", self.CRS)\n print(\"model.DELTA_N = \", self.DELTA_N)\n print(\"model.M_0 = \", self.M_0)\n print(\"model.CUC = \", self.CUC)\n print(\"model.ECCENTRICITY = \", self.ECCENTRICITY)\n print(\"model.CUS = \", self.CUS)\n print(\"model.TOE = \", self.TOE)\n print(\"model.SQRT_A = \", self.SQRT_A)\n print(\"model.CIC = \", self.CIC)\n print(\"model.OMEGA_0 = \", self.OMEGA_0)\n print(\"model.CIS = \", self.CIS)\n print(\"model.I_0 = \", self.I_0)\n print(\"model.CRC = \", self.CRC)\n print(\"model.OMEGA = \", self.OMEGA)\n print(\"model.OMEGA DOT = \", self.OMEGA_DOT)\n print(\"model.IDOT = \", self.IDOT)\n print(\"model.DATA_SOURCES = \" + str(self.DATA_SOURCES))\n print(\"model.GAL_WEEK = \", self.GAL_WEEK)\n print(\"model.SISA_SIGNAL = \", self.SISA_SIGNAL)\n print(\"model.SV_HEALTH = \" + str(self.SV_HEALTH))\n print(\"model.BGD_E5_A = \", self.BGD_E5_A)\n print(\"model.BGD_E5_B = \", self.BGD_E5_B)\n print(\"model.TTOM = \", self.TTOM)\n" }, { "alpha_fraction": 0.5147010087966919, "alphanum_fraction": 0.5364473462104797, "avg_line_length": 46.31233596801758, "blob_id": "58c4cfe49bee7ebf1b22d0d840895bd0e7e49af2", "content_id": "b721d68a8236534497319d7e55aba81a70ba35a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18026, "license_type": "no_license", "max_line_length": 79, "num_lines": 381, "path": "/broadcastnavigationdecoder/satellitePVTHealthEvaluation.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom decimal import *\nfrom math import *\nimport sys\nfrom kpi.data.svNavPVT import SvNavPVT\nfrom kpi.data.svHealthModel import SvHealthModel\nfrom kpi.data.serviceEnum import Service\nfrom kpi.helper.common import Common\nfrom kpi.helper.kpiConstants import KpiConstants\n\n\nclass SatellitePVTHealthEvaluation:\n \"\"\"\n class for the broadcast navigation message decoder module. containts the\n functions for the satellite pvt & health evaluation.\n \"\"\"\n\n log = Common.getLogger('SatellitePVTHealthEvaluation')\n\n def satellitePVTHealthEvaluation(self, NAV_INAV_datasets,\n NAV_FNAV_datasets, service, PRN_list,\n time_tags_gps, aod_max):\n \"\"\"\n calculates the health, position and velocity for a satellite per epoch\n parameter:\n NAV_INAV_datasets [String, [navDataModel]]: clean datasets for inav\n per satellite\n NAV_INAV_datasets [String, [navDataModel]]: clean datasets for inav\n per satellite\n service [string]: list of services.\n PRN_list [string]: list of prn ids\n time_tags_gps [timeTagGpsModel]: gps time tags\n apodMax string: max age of data rate\n returns lists for SV_HEALTH_LIST and SV_NAV_PVT_LIST\n both lists contains data per service foreach satellite in\n a specific epoch.\n So: SV_NAV_PVT_LIST[(epoch,\n [(prn,\n [(service, [navdata])]\n )]\n )]\n 1. select the to the epoch recently received nav message with\n actual age data.\n 2. evaluates the health of a satellite for each service\n 3. calculates the position, velocity and clock bias. If satellite\n health rate is unhealthy, the position, velocity is set to none\n \"\"\"\n self.log.info(\"Start PVT Health Evalutation...\")\n SV_HEALTH_LIST = []\n SV_NAV_PVT_LIST = []\n for epoch in time_tags_gps:\n SV_HEALTH_PER_PRN = []\n SV_NAV_PVT_PER_PRN = []\n for prnId in PRN_list:\n inavSet, fnavSet = self.dataSetSelection(epoch, list(\n filter(\n lambda inav:\n inav[\n 0] == prnId,\n NAV_INAV_datasets)),\n list(filter(\n lambda fnav:\n fnav[\n 0] == prnId,\n NAV_FNAV_datasets)),\n aod_max)\n # TODO wait for answer. should this be \"and\" or \"or\"?\n if (inavSet and fnavSet) is not None:\n SV_HEALTH = self.healthEvaluation(inavSet, fnavSet,\n prnId, epoch,\n service)\n SV_NAV_PVT = self.posVelHealthEval(inavSet, fnavSet,\n prnId, epoch,\n SV_HEALTH,\n service)\n SV_HEALTH_PER_PRN.append((prnId, SV_HEALTH))\n SV_NAV_PVT_PER_PRN.append((prnId, SV_NAV_PVT))\n else:\n self.log.warning(\"No data selected for \", prnId,\n \" at epoch: \", epoch, \" .\")\n SV_HEALTH_LIST.append((epoch, SV_HEALTH_PER_PRN))\n SV_NAV_PVT_LIST.append((epoch, SV_NAV_PVT_PER_PRN))\n self.log.info(\"End PVT Health Evalutation.\")\n return SV_HEALTH_LIST, SV_NAV_PVT_LIST\n\n def posVelHealthEval(self, inavSet, fnavSet, prnId, epoch,\n SV_HEALTH_LIST, service):\n \"\"\"\n calculates the SVNavPVT data for a prnId and an epoch.\n inavSet navDataModel: inavSet selected by dataSetSelection\n fnavSet navDataModel: FnavSet selected by dataSetSelection\n prnId string: id for Satellite\n epoch timeTagGpsModel: gps time tag for epoch\n SV_HEALTH_LIST [service, svHealthModul]: list of health per service\n return [(serviceEnum, SvNavPVT)]: returns SvNavPVT per service\n \"\"\"\n svNavPVTPerService = []\n svINavPVT = SvNavPVT()\n svINavPVT.PRN_ID = prnId\n svFNavPVT = SvNavPVT()\n svFNavPVT.PRN_ID = prnId\n self.log.info(\"Calculate position velocity...\")\n self.calculatePosition(inavSet, epoch, svINavPVT)\n self.calculatePosition(fnavSet, epoch, svFNavPVT)\n self.log.info(\"Position velocity are calculated.\")\n self.log.info(\"Calculate clock bias...\")\n cbInavDouble, cbE1, cbE5b = self.calculateClockBias(\n inavSet, epoch, KpiConstants.fE5b, True)\n cbFnavDouble, cbE5a = self.calculateClockBias(\n fnavSet, epoch, KpiConstants.fE5a, False)\n self.log.info(\"Clock bias evaluated.\")\n # TODO do at the end\n for svHealh in SV_HEALTH_LIST:\n if svHealh[1].SIS_HS == \"1\":\n svNavPVTPerService(([0], SvNavPVT(\n prnId, None, None, None, None, None, None,\n None)))\n else:\n if Service.E1.value in service and svHealh[0] == Service.E1:\n svINavPVT.Clk_apc_ref = cbE1\n svNavPVTPerService.append((Service.E1, svINavPVT))\n elif (Service.E5b.value in service and svHealh[0] ==\n Service.E5b):\n svINavPVT.Clk_apc_ref = cbE5b\n svINavPVT.Clk_apc_ref = cbE1\n svNavPVTPerService.append((Service.E5b, svINavPVT))\n elif (Service.E1E5b.value in service and svHealh[0] ==\n Service.E1E5b):\n svINavPVT.Clk_apc_ref = cbInavDouble\n svINavPVT.Clk_apc_ref = cbE1\n svNavPVTPerService.append((Service.E1E5b, svINavPVT))\n elif (Service.E5a.value in service and svHealh[0] ==\n Service.E5a):\n svFNavPVT.Clk_apc_ref = cbE5a\n svFNavPVT.Clk_apc_ref = cbE1\n svNavPVTPerService.append((Service.E5a, svFNavPVT))\n elif (Service.E1E5a.value in service and svHealh[0] ==\n Service.E1E5a):\n svFNavPVT.Clk_apc_ref = cbFnavDouble\n svFNavPVT.Clk_apc_ref = cbE1\n svNavPVTPerService.append((Service.E1E5a, svFNavPVT))\n return svNavPVTPerService\n\n def calculateClockBias(self, navSet, epoch, freqRate, isInav):\n \"\"\"\n calculates the clock bias for the services.\n Parameter:\n navSet navDataModel: navSet selected by datasetSelection per prn\n epoch timeTagGpsModel: gps time tag for epoch\n freqRate float: frequency rate of f2 service\n isInav boolean: considering the return value\n return clockbias for dual frequence and when inav for e1 and e5b\n when fnav only for e5a\n \"\"\"\n t = float(epoch[1])\n # TODO, is TOE right here? wait for answer of dlr\n t0c = float(navSet.TOE)\n af0 = float(navSet.SV_CLOCK_BIAS)\n af1 = float(navSet.SV_CLOCK_DRIFT)\n af2 = float(navSet.SV_CLOCK_DRIFT_RATE)\n tsv = af0 + af1 * (t - t0c) + af2 * (t - t0c) ** 2\n TR1 = float(navSet.BGD_E5_A)\n TR2 = float(navSet.BGD_E5_B)\n BGD = (TR1 - TR2) / (1 - (KpiConstants.fE1 / freqRate)**2)\n tsvE1 = tsv - BGD\n tsvE2 = tsv - (KpiConstants.fE1 / freqRate)**2 * BGD\n self.log.debug(\"Clockbias for double Frequencies: \" + str(tsv))\n if isInav:\n self.log.debug(\"Clockbias for INAV: E1:\" +\n str(tsvE1) + \"E1E5b: \" + str(tsvE2))\n return tsv, tsvE1, tsvE2\n else:\n self.log.debug(\"Clockbias for E1E5a: \" + str(tsvE2))\n return tsv, tsvE2\n\n def calculatePosition(self, navSet, epoch, navPVT):\n \"\"\"\n calculates the position and velocity for a navSet from prn per epoch.\n parameter:\n navSet navDataModel: navSet selected by datasetSelection per prn\n epoch timeTagGpsModel: gps time tag for epoch\n navPVT SvNavPVT: variable for nav position and velocity.\n \"\"\"\n # calculate position. for test only compare to tum1895 file.\n # TODO for real tests wait for dlr data\n t = float(epoch[1])\n e = float(navSet.ECCENTRICITY)\n A = float(navSet.SQRT_A) ** 2\n n0 = sqrt(KpiConstants.ggC / A**3)\n tk = t - float(navSet.TOE)\n n = n0 - float(navSet.DELTA_N)\n M = float(navSet.M_0) + n * tk\n\n # TODO, is this E calculation correct? wait for answer\n E = (M + e * sin(M) + e**2 * sin(M) *\n cos(M) + 1 / 2 * e ** 3 *\n sin(M) * (3 * cos(M) ** 2 - 1))\n self.log.debug(\"E: \" + str(E))\n v1 = sqrt(1 - e**2) * sin(E) / (1 - e * cos(E))\n v2 = (cos(E) - e) / (1 - e * cos(E))\n v = atan(v1 / v2)\n latitude = v + float(navSet.OMEGA)\n latCorrection = float(navSet.CUS) * sin(2 * latitude) + \\\n float(navSet.CUC) * cos(2 * latitude)\n radCorrection = float(navSet.CRS) * sin(2 * latitude) + \\\n float(navSet.CRC) * cos(2 * latitude)\n inclCorrection = float(navSet.CIS) * sin(2 * latitude) + \\\n float(navSet.CIC) * cos(2 * latitude)\n u = latitude + latCorrection\n r = A * (1 - e * cos(E)) + radCorrection\n i = float(navSet.I_0) + inclCorrection + float(navSet.IDOT) * tk\n x1 = r * cos(u)\n y1 = r * sin(u)\n corLongitude = float(navSet.OMEGA_0) + \\\n (float(navSet.OMEGA_DOT) - KpiConstants.OMEGAe) * \\\n tk - KpiConstants.OMEGAe * float(navSet.TOE)\n x = x1 * cos(corLongitude) - y1 * \\\n cos(i) * sin(corLongitude)\n y = x1 * sin(corLongitude) + y1 * \\\n cos(i) * cos(corLongitude)\n z = y1 * sin(i)\n self.log.debug(\"X_nav: \" + str(x) + \"Y_nav: \" + str(y) +\n \"Z_nav: \" + str(z))\n navPVT.X_nav = x\n navPVT.Y_nav = y\n navPVT.Z_nav = z\n\n # calculate velocity\n # TODO cant test until testdata is delivered from dlr\n eDot = n / (1 - e * cos(E))\n vDot1 = (sin(E) * eDot * (1 + e * cos(v)))\n vDot2 = (sin(v) * (1 + -e * cos(E)))\n vDot = vDot1 / vDot2\n uDot = vDot + 2 * (float(navSet.CUS) * cos(2 * u) - float(navSet.CUC) *\n sin(2 * u)) * vDot\n rDot = (A * e * sin(E) * n / (1 - e * cos(E))) + \\\n 2 * (float(navSet.CRS) *\n cos(2 * u) - float(navSet.CIC) * sin(2 * u)) * vDot\n\n iDot1 = float(navSet.IDOT) + (float(navSet.CIS) * cos(2 * u) -\n float(navSet.CIC) * sin(2 * u)) * \\\n 2 * vDot\n wDot = float(navSet.OMEGA_DOT) - KpiConstants.OMEGAe\n xDot1 = rDot * cos(u) - y1 * uDot\n yDot1 = rDot * sin(u) - x1 * uDot\n\n vx = (xDot1 - y1 * cos(i) * wDot) * cos(corLongitude) - \\\n (x1 * wDot + yDot1 * cos(i) - y1 * sin(i) * iDot1) * \\\n sin(corLongitude)\n vy = (xDot1 - y1 * cos(i) * wDot) * sin(corLongitude) + \\\n (x1 * wDot + yDot1 * cos(i) - y1 * sin(i) * iDot1) * \\\n cos(corLongitude)\n vz = yDot1 * sin(i) + y1 * cos(i) * iDot1\n self.log.debug(\"VX_nav: \" + str(vx) + \"VY_nav: \" + str(vy) +\n \"VZ_nav: \" + str(vz))\n navPVT.Vx_nav = vx\n navPVT.Vy_nav = vy\n navPVT.Vz_nav = vz\n\n def healthEvaluation(self, inavSet, fnavSet, prnId, epoch, service):\n \"\"\"\n calculates the health evaluation for a satellite in an epoch.\n SIS HS is healthy (\"0\"), when the service specific SHS, DVS\n variables are healthy and when the SISA not NAPA (Sisa = 255) is\n parameter:\n inavset navDataModel: dataset for inav\n fnavset navDataModel: dataset for fnav\n prnId String: id of satellite\n epoch timeTagGpsModel: actual topic for health evaluation\n return [(ServiceEnum, [SvHealthModel])]: List of SVHealth per service\n \"\"\"\n healtEvaluationPerService = []\n self.log.info(\"Start health evaluation.\")\n DVSe1, SHSe1, DVSe5a, SHSe5a, DVSe5b, SHSe5b = self.getBinHelth(\n Common.convertDecimalToBinary(inavSet.SV_HEALTH, 9),\n Common.convertDecimalToBinary(fnavSet.SV_HEALTH, 9))\n # get the sisa indices. When SISA VALUE = 255 then is no accuracy\n # prediction avaible (napa)\n sisaE1E5a = \"0\" if Decimal(fnavSet.SISA_SIGNAL) != 255 else \"1\"\n sisaE1E5b = \"0\" if Decimal(inavSet.SISA_SIGNAL) != 255 else \"1\"\n\n # healthevaluation for E1\n if Service.E1.value in service:\n SISe1 = \"0\" if (SHSe1 == \"00\" and DVSe1 == \"0\" and\n sisaE1E5b == \"0\") else \"1\"\n healtEvaluationPerService.append((Service.E1, SvHealthModel(\n prnId, epoch, SISe1, SHSe1, DVSe1,\n \"not NAPA\" if SISe1 == \"0\" else inavSet.SISA_SIGNAL)))\n\n # healthevaluation for E5a\n if Service.E5a.value in service:\n SISe5a = \"0\" if (SHSe5a == \"00\" and DVSe5a == \"0\" and\n sisaE1E5a == \"0\") else \"1\"\n healtEvaluationPerService.append((Service.E5a, SvHealthModel(\n prnId, epoch, SISe5a, SHSe5a, DVSe5a,\n \"not NAPA\" if SISe5a == \"0\" else fnavSet.SISA_SIGNAL)))\n\n # healthevaluation for E5b\n if Service.E5b.value in service:\n SISe5b = \"0\" if (SHSe5b == \"00\" and DVSe5b == \"0\" and\n sisaE1E5b == \"0\") else \"1\"\n healtEvaluationPerService.append((Service.E5b, SvHealthModel(\n prnId, epoch, SISe5b, SHSe5b, DVSe5b,\n \"not NAPA\" if SISe5b == \"0\" else inavSet.SISA_SIGNAL)))\n\n # healthevaluation for E1,E5a\n if Service.E1E5a.value in service:\n SISe1e5a = \"0\" if (SHSe1 == \"00\" and DVSe1 == \"0\" and\n SHSe5a == \"00\" and DVSe5a == \"0\" and\n sisaE1E5a == \"0\") else \"1\"\n healtEvaluationPerService.append((Service.E1E5a, SvHealthModel(\n prnId, epoch, SISe1e5a, SHSe1 + SHSe5a, DVSe1 + DVSe5a,\n \"not NAPA\" if SISe1e5a == \"0\" else fnavSet.SISA_SIGNAL)))\n\n # healthevaluation for E1,E5b\n if Service.E1E5b.value in service:\n SISe1e5b = \"0\" if (SHSe1 == \"00\" and DVSe1 == \"0\" and\n SHSe5b == \"00\" and DVSe5b == \"0\" and\n sisaE1E5b == \"0\") else \"1\"\n healtEvaluationPerService.append((Service.E1E5b, SvHealthModel(\n prnId, epoch, SISe1e5b, SHSe1 + SHSe5b, DVSe1 + DVSe5b,\n \"not NAPA\" if SISe1e5b == \"0\" else inavSet.SISA_SIGNAL)))\n\n self.log.info(\"End health evaluation.\")\n return healtEvaluationPerService\n\n def getBinHelth(self, binHealthInav, binHealthFnav):\n return (binHealthInav[0:1], binHealthInav[1:3], binHealthFnav[3:4],\n binHealthFnav[4:6], binHealthInav[6:7], binHealthInav[7:9])\n\n def dataSetSelection(self, timeTagGps, NAV_INAV_datasets,\n NAV_FNAV_datasets, aod_max):\n \"\"\"\n calculates the most recently transmitted message\n parameter:\n timeTagGps (string, string): actual timetag\n NAV_INAV_datasets [(String, [navDataModel])]: list of tupel with\n (prnId, [navDataModel] ). contains dataset with inav data\n NAV_FNAV_datasets [(String, [navDataModel])]: list of tupel with\n (prnId, [navDataModel] ). contains dataset with fnav data\n e.g. [(\"E08\", [navDataModel, navDataModel])]\n aod_max string: the maximal age of data\n return (NavDataModel, NavDataModel): the computed datamodel\n for the satellitePVTHealthEvaluation for inav and fnav\n \"\"\"\n WN = float(timeTagGps[0])\n t = float(timeTagGps[1])\n computedInav = None\n computedFnav = None\n recentlyTransmittedDeltaTTOMInav = None\n recentlyTransmittedDeltaTTOMFnav = None\n # compute inav dataset\n for navDataTupel in NAV_INAV_datasets:\n for navData in navDataTupel[1]:\n galWeek = float(navData.GAL_WEEK)\n toe = float(navData.TOE)\n ttom = float(navData.TTOM)\n deltaTTOM = t + 604800 * WN - (ttom + 604800 * galWeek)\n if (recentlyTransmittedDeltaTTOMInav is None or\n recentlyTransmittedDeltaTTOMInav >= deltaTTOM):\n aod = t + 604800 * WN - (toe + 604800 * galWeek)\n # TODO check if 0 < or <=.\n if 0 <= aod and aod <= aod_max * 3600:\n recentlyTransmittedDeltaTTOMInav = deltaTTOM\n computedInav = navData\n # compute fnav dataset\n for navDataTupel in NAV_FNAV_datasets:\n for navData in navDataTupel[1]:\n galWeek = float(navData.GAL_WEEK)\n toe = float(navData.TOE)\n ttom = float(navData.TTOM)\n deltaTTOM = t + 604800 * WN - (ttom + 604800 * galWeek)\n if (recentlyTransmittedDeltaTTOMFnav is None or\n recentlyTransmittedDeltaTTOMFnav >= deltaTTOM):\n aod = t + 604800 * WN - (toe + 604800 * galWeek)\n # TODO check if 0 < or <=.\n if 0 <= aod and aod <= aod_max * 3600:\n recentlyTransmittedDeltaTTOMFnav = deltaTTOM\n computedFnav = navData\n return computedInav, computedFnav\n" }, { "alpha_fraction": 0.611554741859436, "alphanum_fraction": 0.6387975811958313, "avg_line_length": 40.74509811401367, "blob_id": "845f6b69b78442af4b6f9de031d4b43baf0abd74", "content_id": "5e84fc35f657a66e2d045eb0a6f6f5e20ff631c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2129, "license_type": "no_license", "max_line_length": 77, "num_lines": 51, "path": "/data/configFileModel.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nclass ConfigFileModel:\n \"\"\"\n config file object. inherit all the variables extracted from config file.\n Use for variable Parser.\n Parameter:\n LOG_FILE string: name of logfile\n START_EPOCH string: timeTag begin of Epoch Intervall\n END_EPOCH string: timeTag end of Epoch Intervall\n STEP string: sampling intervall in seconds\n LEAP_SEC [string]: list of announced leap seconds to date\n AOD_MAX string: max value of age of data (hours)\n GAL_BRC string: Galileo broadcast navidaton data name\n GAL_REF string: Galileo precise orbits data repo name\n APC_OFF_REF string: Antenne phase center offests file\n APC_OFF_BCE string: Antenne phase center offests file\n DCB_REF string: differential code bias file\n INP_FLD string: storage folder path for input files\n OUT_FLD string: storage folder path for output files\n PRN_ID string: satellites PRN to be included in the computation\n SERVICE string: desired service for computing kpis\n STATION_LIST [string]: list of stations\n \"\"\"\n\n def __init__(self, logFile=\"LogFile.log\", startEpoch=\"2016 05 01 0 0 0\",\n endEpoch=\"2016 05 02 0 0 0\", step=\"300\",\n leapSec=\"\", aodMax=\"3\", galBrc=\"brdm\", galRef=\"tum\",\n apcOffRef=\"igs08_1887.atx\", apcOffBce=\"gps_brdc_1771.atx\",\n dcbRef=\"dcb:file.bsx\", inpField=\"{absolute path}/Input\",\n outField=\"{absolute path}/Input\",\n prnId=\"E08,E09,E11,E12,E19,E22,E24,E26,E30\",\n service=\"E1,E5a,E5b\", stationList=\"kour\"):\n self.LOG_FILE = logFile\n self.START_EPOCH = startEpoch\n self.END_EPOCH = endEpoch\n self.STEP = step\n self.LEAP_SEC = leapSec\n self.AOD_MAX = aodMax\n self.GAL_BRC = galBrc\n self.GAL_REF = galRef\n self.APC_OFF_REF = apcOffRef\n self.APC_OFF_BCE = apcOffBce\n self.DCB_REF = dcbRef\n self.INP_FLD = inpField\n self.OUT_FLD = outField\n self.PRN_ID = prnId\n self.SERVICE = service\n self.STATION_LIST = stationList\n" }, { "alpha_fraction": 0.5789473652839661, "alphanum_fraction": 0.589712917804718, "avg_line_length": 22.22222137451172, "blob_id": "34271f3cb76fbaccc0718208bb8ad8db32bb31ad", "content_id": "8aea70f287eaa3a5336aa4442eceb500df1c695a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 836, "license_type": "no_license", "max_line_length": 69, "num_lines": 36, "path": "/sisre/svephemerisclockerror.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Author: Thomas Schneider\n# Date : 13.09.2016\n\nimport sys\nfrom kpi.helper.common import Common\nfrom kpi.sisre.svattitude import SVAttitude\nfrom kpi.data.svRefPVTCoM import SvRefPVTCoM\n\n\nclass SVEphemerisClockError:\n \"\"\"\n computes SV Ephemeris and Clock error per epoch and satellite\n \"\"\"\n\n def __init__(self, date):\n self.date = date\n\n def computeEphemerisError(self, pvt, U):\n \"\"\"\n computes Ephemeris error\n pvt: SvRefPVTCoM\n U: roation matrice given by SVAttitude\n \"\"\"\n def computeClockError(self, pvt, U):\n \"\"\"\n computes Ephemeris error\n pvt: SvRefPVTCoM\n U: roation matrice given by SVAttitude\n \"\"\"\n \"\"\"\n \"\"\"\n\n def __str__(self):\n return __name__ + \"(\" + self.date + \")\"\n" }, { "alpha_fraction": 0.6349999904632568, "alphanum_fraction": 0.6462500095367432, "avg_line_length": 31, "blob_id": "4fa8e812710a906aa55983a58640018e4e433766", "content_id": "69a2a7e7f22e23ad8f4609e25da2e46877e388f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 800, "license_type": "no_license", "max_line_length": 71, "num_lines": 25, "path": "/data/stationObservationsData.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nclass StationObservationsData:\n \"\"\"\n Author: Bhaskar Shankar (IDV-AG)\n Date : 05 Sept 2016\n\n MODULE: 5.6 Observation files decoder module\n\n Parameters:\n OBSERVABLES_FILE_NAME - File name of Observable file\n OBSERVABLES_HEADER - Header information of observable file\n OBSERVATION_RECORDS - Observation record containing PRNID and\n record data at every timetagcal Date\n\n \"\"\"\n\n def __init__(self, observablesFileName=\"\", observablesHeader=\"\",\n observationRecords=\"\"):\n self.OBSERVABLES_FILE_NAME = observablesFileName\n self.OBSERVABLES_HEADER = observablesHeader\n # Class Ref: observationRecords\n self.OBSERVATION_RECORDS = observationRecords\n" }, { "alpha_fraction": 0.5683019161224365, "alphanum_fraction": 0.5690566301345825, "avg_line_length": 31.317073822021484, "blob_id": "ba7513d31fd9260cda74bef4abc4556e3e51778b", "content_id": "55397ccdef782952eecaebae5dd9d1ba82bf48c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1327, "license_type": "no_license", "max_line_length": 74, "num_lines": 41, "path": "/data/svNavPVT.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nclass SvNavPVT:\n \"\"\"\n Position data in ECEF format, based on broadcast ephemeris data. It is\n expressed to SV’s Antenna Phase Center NAV in meters.\n PRN_ID string: satellite id\n X_nav float: X_nav variable in meters\n Y_nav float: Y_nav variable in meters\n Z_nav float: Z_nav variable in meters\n Vx_nav float: vX_nav variable in meters/seconds\n Vy_nav float: vY_nav variable in meters/seconds\n Vz_nav float: vZ_nav variable in meters/seconds\n Clk_apc_ref float: clkNav variable in seconds\n \"\"\"\n\n def __init__(self, prnId=\"\", xNav=\"\", yNav=\"\", zNav=\"\",\n vxNav=\"\", vYNav=\"\", vZNav=\"\", clkNav=\"\"):\n self.PRN_ID = prnId\n self.X_nav = xNav\n self.Y_nav = yNav\n self.Z_nav = zNav\n self.Vx_nav = vxNav\n self.Vy_nav = vYNav\n self.Vz_nav = vZNav\n self.Clk_apc_ref = clkNav\n\n def printMe(self):\n \"\"\"\n prints all the values of svNavPVT\n \"\"\"\n print(\"PRN Id: \", self.PRN_ID)\n print(\"XNav: \", self.X_nav)\n print(\"YNav: \", self.Y_nav)\n print(\"ZNav: \", self.Z_nav)\n print(\"VxNav: \", self.Vx_nav)\n print(\"VyNav: \", self.Vy_nav)\n print(\"VzNav: \", self.Vz_nav)\n print(\"Clk_apc_ref: \", self.Clk_apc_ref)\n" }, { "alpha_fraction": 0.583048403263092, "alphanum_fraction": 0.5924501419067383, "avg_line_length": 40.78571319580078, "blob_id": "cda5d1ce70bf5b11ed72da014efed2d2fdf6191b", "content_id": "ecbbae8e9647127cf40ee0689d9027dcb62a7927", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7020, "license_type": "no_license", "max_line_length": 100, "num_lines": 168, "path": "/broadcastnavigationdecoder/broadcastNavigationMessageDecoder.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom math import *\nfrom decimal import *\nfrom enum import Enum\nfrom kpi.broadcastnavigationdecoder.rinex3NAVParser import BRDMParser\nfrom kpi.broadcastnavigationdecoder.satellitePVTHealthEvaluation import SatellitePVTHealthEvaluation\nfrom kpi.helper.common import Common\n\n\nclass Dataset(Enum):\n \"\"\"\n enum for the datasets.\n INAV if datasource is 100xxxx01, 001xxxx01 or 101xxxx01\n FNAV if datasource is 010xxxx10\n NONE if nothing fits\n \"\"\"\n INAV = \"INAV\"\n FNAV = \"FNAV\"\n NONE = \"NONE\"\n\n\nclass BroadcastNavigationMessageDecoder:\n \"\"\"\n reads the bce file in text format\n and converts to internal parameters\n \"\"\"\n log = Common.getLogger('BroadCastNavigationMessageDecoder')\n\n def broadcastNavigationMessageDecoder(self, bceList, service, prn_ID,\n time_tags_gps, aod_max):\n \"\"\"\n this module is in charge of evaluating satelite health, position,\n velocity and clock bias rate based on broadcast epemeris.\n it's parsing the rinex3 files and handle the received rinex data.\n the module connsits of four smaller modules.\n parameter:\n bceList [string]: name of bce files\n service [string]: list of services\n prnId [string]: list of satellites\n time_tags_gps [timeTagGpsModel]: gps time tags\n aodMax string: max age of data rate\n return: time_transfer_data [TIME_SYSTEM_CORRECTION],\n SV_Health, SV_NAV_POS_VEL both lists contains data per\n service foreach satellite in a specific epoch.\n 1. rinex parser: parse the rinex file\n 2. time transfer evaluation: extracts the time system correction\n 3. navdata cleaning: seperates each navigation message to INAV and FNAV\n 4. Satellite PVT & Health Evaluation: calculates position, velocity and\n health evaluation.\n \"\"\"\n self.log.info(\"Start broadcast navigation message decoder...\")\n NAV_headers, NAV_datasets = BRDMParser().rinex3navParser(bceList)\n time_transfer_data = self.timeTransferEvaluation(NAV_headers)\n Inav, Fnav = self.navDataCleaning(NAV_datasets)\n SV_HEALTH, SV_NAV_POS_VEL = SatellitePVTHealthEvaluation().\\\n satellitePVTHealthEvaluation(\n Inav, Fnav, service, prn_ID,\n time_tags_gps, aod_max)\n self.log.info(\"End broadcast navigation message decoder.\")\n self.log.info(\"Transfer data...\")\n return time_transfer_data, SV_HEALTH, SV_NAV_POS_VEL\n\n def timeTransferEvaluation(self, NAV_headers):\n \"\"\"\n extracts the time system correction variables from nav header and\n return time transfer evaluation as time transfer data\n parameter:\n Nav_headers [navHeaderModel]: nav header data\n return time_transfer_data [TIME_SYSTEM_CORRECTION]\n \"\"\"\n self.log.info(\"Start time Transfer Evaluation...\")\n time_transfer_data = []\n for navHeader in NAV_headers:\n for timeSysCorrection in navHeader.TIME_SYSTEM_CORRECTION:\n time_transfer_data.append(timeSysCorrection)\n self.log.info(\"End time Transfer Evaluation.\")\n return time_transfer_data\n\n def navDataCleaning(self, NAV_datasets):\n \"\"\"\n seperates the nav_data messages to I-NAV and F.NAV messages.\n Check the consistency of the NAV_Datasets.\n parameter:\n nav_datasets [navDataModel]: list contains all the navDataSets\n from brdm file\n return NAV_INAV_datasets [prnId, [navDataModel]] and\n NAV_FNAV_datasets [prnId, [navDataModel]]. NAV_Datasets splitted\n into INAV and FNAV.\n \"\"\"\n self.log.info(\"Start Nav Data Cleaning...\")\n NAV_INAV_datasets = []\n NAV_FNAV_datasets = []\n oldPRNId = \"\"\n inavData = []\n fnavData = []\n for navData in NAV_datasets:\n if len(oldPRNId) > 1 and int(\n navData.PRN_ID[1:]) is not int(oldPRNId[1:]):\n NAV_INAV_datasets.append((oldPRNId, inavData))\n NAV_FNAV_datasets.append((oldPRNId, fnavData))\n # reset i-nav/ f-nav temp data\n inavData = []\n fnavData = []\n if self.isTimeConsistence(navData.TTOM, navData.TOE):\n dataSet = self.getDataset(navData.DATA_SOURCES)\n if self.isSISAConsistence(navData.SISA_SIGNAL):\n if Dataset.INAV == dataSet:\n inavData.append(navData)\n elif Dataset.FNAV == dataSet:\n fnavData.append(navData)\n else:\n self.log.warning(\"NavData is neither INAV nor FNAV.\" +\n \"Disregard NAV_Dataset: PRN: %s,\" +\n \" Epoch: %s\",\n navData.PRN_ID, navData.EPOCH)\n else:\n self.log.warning(\"NavData signal is inconsitent.\" +\n \"Disregard NAV_Dataset: PRN: %s,\" +\n \" Epoch: %s\",\n navData.PRN_ID, navData.EPOCH)\n else:\n self.log.warning(\"NavData ttoe and ttom are inconsitent.\" +\n \"Disregard NAV_Dataset: PRN: %s, Epoch: %s\",\n navData.PRN_ID, navData.EPOCH)\n oldPRNId = navData.PRN_ID\n NAV_INAV_datasets.append((oldPRNId, inavData))\n NAV_FNAV_datasets.append((oldPRNId, fnavData))\n return NAV_INAV_datasets, NAV_FNAV_datasets\n\n def getDataset(self, datasource):\n \"\"\"\n checks the datasource which dataset it's representing\n datasource decimal: datasource\n return Dataset: returns dataset of INAV, FNAV or none\n \"\"\"\n binValue = Common.convertDecimalToBinary(datasource, 10)\n binComparable = binValue[0:3] + binValue[8:10]\n if (binComparable == (\"10001\" or \"00101\" or \"10101\")):\n return Dataset.INAV\n elif binComparable == \"01010\":\n return Dataset.FNAV\n return Dataset.NONE\n\n def isTimeConsistence(self, ttom, toe):\n \"\"\"\n check if nav_data is consistence, by checking ttom and toe\n parameter:\n ttom String: transmission time of message sec of GAL week\n toe String: toe time of ephemeris sec of GAL week\n return true if ttom is >= toe\n \"\"\"\n if Decimal(ttom) >= Decimal(toe):\n return True\n else:\n return False\n\n def isSISAConsistence(self, sisa):\n \"\"\"\n check if sisa is provided in index format and not in metric\n parameter:\n sisa String: SISA Value\n return true if sisa is between 0 and 255\n \"\"\"\n if Decimal(sisa) >= 0 and Decimal(sisa) < 256:\n return True\n else:\n return False\n" }, { "alpha_fraction": 0.6007215976715088, "alphanum_fraction": 0.604630172252655, "avg_line_length": 40.061729431152344, "blob_id": "67f43e59e3ba62ba1103a2bfb388b8fdff75619c", "content_id": "7c8db24c00c88bb2c18bafb260660d7a6de64029", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3327, "license_type": "no_license", "max_line_length": 95, "num_lines": 81, "path": "/sisre/sisremain.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Author: Thomas Schneider\n# Date : 18.08.2016\n\nfrom kpi.helper.common import Common\nfrom kpi.sisre.svattitude import SVAttitude\nfrom kpi.sisre.svephemerisclockerror import SVEphemerisClockError\nfrom kpi.sisre.sisrestatistics import SISREStatistics\nfrom kpi.antennaOffsetFilesDecoderModule.antennaOffsetParser import AntennaOffsetParser\nfrom kpi.configParser.configFileParser import ConfigFileParser, ConfigFileModel\n\n\nclass SISREMain:\n \"\"\"\n computes sv-attitude, ephemeris and clock error and \n sisre statistics per epoch and satellite\n \"\"\"\n log = None\n\n def __init__(self):\n Common.printSystemInfo()\n self.log = Common.getLogger(__name__)\n\n def calculateSVAttitudes(self, start, end, gals, epoch_hour):\n \"\"\" \n deprecated: calculates SV Attitude for all given satellites \n start: calendar date\n end : calendar date\n gals : satellite data (x, y, z, vx, vy, vz)\n epoch: steps in milliseconds between start and end \n \"\"\"\n print(\"=== Satellite Calculation Progress: ===\")\n for i in range(1, epoch_hour):\n Common.printProgress(\n i, epoch_hour, \"calculating epoch: \" + str(i))\n sv = SVAttitude(*start, i)\n sv.computeRotationAndSpeedMatrices(gals)\n if sv.reached(end):\n break\n print(\"S and U Matrices calculated for\", len(gals),\n \" Satellites and\", epoch_hour, \" time steps\", \"( see logfile 'kpi.log')\")\n\n def calculateSatellites(self, svRefPVTCoM, dateTags):\n \"\"\" \n calculates SISRE Statistics for all given satellites\n svRefPVTCoM: galileo satellite data (x, y, z, vx, vy, vz, CoM)\n dateTags : dateTags for all rows of svRefPVTCoM\n \"\"\"\n # TODO: mit Maxi klären, was in svRefPVTCoM und dateTags steht\n i = 0\n print(\"=== Satellite Calculation Progress: ===\")\n for d in dateTags:\n # convert the date string having a float on last position to a list\n # of integers.\n date = [float(x) for x in d.split()]\n date = [int(x) for x in date]\n\n Common.printProgress(\n i, len(dateTags), \"calculating dateTag: \" + str(date))\n\n # prepare calculation\n sv = SVAttitude(*date, 0)\n confParser = ConfigFileParser()\n kpi = confParser.kpiCfg()\n apcParser = AntennaOffsetParser()\n apc_nav, apc_pro = apcParser.antennaOffsetParser(\n confParser.inputPath + kpi.APC_OFF_REF, confParser.inputPath + kpi.APC_OFF_BCE)\n ephClkError = SVEphemerisClockError(date)\n stats = SISREStatistics(date)\n # do calculation per satellite\n for pvt in svRefPVTCoM:\n U, S = sv.computeRotationAndSpeedMatrice(pvt)\n vecErr = ephClkError.computeEphemerisError(pvt, U)\n clkErr = ephClkError.computeClockError(pvt, U)\n sisre = stats.computeSISRE(vecErr, clkErr, S)\n self.log.info(\"SISRE:\" + sisre)\n i += 1\n\n print(\"SISRE Statistics calculated for\", len(svRefPVTCoM),\n \" Satellites and\", len(dateTags), \" time steps (see logfile 'kpi.log')\")\n" }, { "alpha_fraction": 0.543083906173706, "alphanum_fraction": 0.6476757526397705, "avg_line_length": 43.658226013183594, "blob_id": "11891d66e74933f73b4c93d14dbaee72498ac9c2", "content_id": "3e10277f14b342487e37c926767a1fbf1b9f8bd3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3528, "license_type": "no_license", "max_line_length": 80, "num_lines": 79, "path": "/test/rinex3NAVParserTest.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "import unittest\nfrom nose.tools import *\nfrom decimal import *\nfrom kpi.broadcastnavigationdecoder.rinex3NAVParser import BRDMParser\n\n\nclass TestRinex3NAVParser(unittest.TestCase):\n\n def setUp(self):\n self.bceList = [\n \"test/resources/broadcastnavigationdecoder/testbrdm1220.16p\"]\n\n def testRinex3navParserSuccess(self):\n navHeader, NAV_datasets = BRDMParser().rinex3navParser(\n self.bceList)\n assert navHeader[0].TIME_SYSTEM_CORRECTION[\n 0].A_0 == '-1.8626451492e-09'\n assert navHeader[0].TIME_SYSTEM_CORRECTION[\n 0].A_1 == '2.664535259e-15'\n assert navHeader[0].TIME_SYSTEM_CORRECTION[\n 3].A_0 == '-6.1118043959e-10'\n assert navHeader[0].TIME_SYSTEM_CORRECTION[\n 3].A_1 == '-5.773159728e-15'\n assert navHeader[0].TIME_SYSTEM_CORRECTION[3].TSC_S == \"EGNOS\"\n assert navHeader[0].TIME_SYSTEM_CORRECTION[0].TSC_S == \"SNN\"\n assert navHeader[0].TIME_SYSTEM_CORRECTION[4].REF_TIME == \"225280\"\n assert navHeader[0].TIME_SYSTEM_CORRECTION[2].REF_WEEK_NR == \"1894\"\n assert navHeader[0].FORMAT_VERSION == \"3.02\"\n assert navHeader[0].TIME_TAG == \"20160502 012903\"\n assert len(NAV_datasets) == 1404\n assert NAV_datasets[0].TIME_TAG == \"05 01 00 00 00\"\n assert NAV_datasets[0].SV_CLOCK_BIAS == \"1.810591667891e-03\"\n assert NAV_datasets[1].TIME_TAG == \"05 01 00 10 00\"\n assert NAV_datasets[3].SV_CLOCK_DRIFT_RATE == \"0.000000000000e+00\"\n assert NAV_datasets[4].IOD_NAV == \"4.000000000000e+00\"\n assert NAV_datasets[5].ECCENTRICITY == \"2.203179756179e-04\"\n assert NAV_datasets[6].OMEGA_0 == \"-2.817293386321e-01\"\n assert NAV_datasets[7].OMEGA_DOT == \"-5.682736708794e-09\"\n assert NAV_datasets[8].IDOT == \"2.082229590363e-10\"\n assert NAV_datasets[9].GAL_WEEK == \"1.895000000000e+03\"\n assert NAV_datasets[10].SISA_SIGNAL == \"3.120000000000e+00\"\n assert NAV_datasets[11].TTOM == \"5.455000000000e+03\"\n assert NAV_datasets[12].DATA_SOURCES == 517.0000000000\n assert NAV_datasets[13].SV_HEALTH == 000.0000000000\n assert NAV_datasets[len(NAV_datasets) - 1].PRN_ID == \"E11\"\n assert NAV_datasets[\n len(NAV_datasets) - 1].TIME_TAG == \"05 01 23 40 00\"\n\n def testRinex3navParserNoGalileoData(self):\n self.bceListWrong = [\n \"test/resources/broadcastnavigationdecoder/testbrdmNoGalileo.16p\"]\n navHeader, NAV_datasets = BRDMParser().rinex3navParser(\n self.bceListWrong)\n assert len(NAV_datasets) == 0\n\n def testRinex3navParserNoEndOfHeaderTag(self):\n self.bceListWrong = [\n \"test/resources/broadcastnavigationdecoder/testbrdmWrongHeader.16p\"]\n navHeader, NAV_datasets = BRDMParser().rinex3navParser(\n self.bceListWrong)\n assert len(NAV_datasets) == 0\n\n def testRinex3navParserNoHeader(self):\n self.bceListWrong = [\n \"test/resources/broadcastnavigationdecoder/testbrdmNoHeader.16p\"]\n navHeader, NAV_datasets = BRDMParser().rinex3navParser(\n self.bceListWrong)\n assert len(NAV_datasets) == 0\n\n def testRinex3navParserNoTTom(self):\n self.bceListWrong = [\n \"test/resources/broadcastnavigationdecoder/testbrdmNoTtom.16p\"]\n navHeader, NAV_datasets = BRDMParser().rinex3navParser(\n self.bceListWrong)\n assert NAV_datasets[0].TTOM == \"0.9999e+09\"\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.6449957489967346, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 40.25882339477539, "blob_id": "81efb49cc6e870eea1134a71847e76e6862f14a5", "content_id": "25aa431f571541f73c1651890f88cf316b8e761c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3507, "license_type": "no_license", "max_line_length": 93, "num_lines": 85, "path": "/test/observationFilesDecoderTest.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport unittest\nfrom nose.tools import *\nfrom kpi.observationFilesDecoderModule.observationFilesDecoder import ObservationFilesDecoder\n\n\nclass testObservationFilesDecoder(unittest.TestCase):\n\n def setUp(self):\n \"\"\"\n setup call, runs before each test\n \"\"\"\n self.configFile = \"test/resources/Configuration.cfg\"\n self.classToTest = ObservationFilesDecoder()\n self.resourcePath = \"test/resources/observationFilesDecoderTest/\"\n self.observationFilesSingle = self.resourcePath + \"test_kour1220.16o\"\n self.observationFiles = [\n self.resourcePath + \"test_kour1220.16o\", self.resourcePath +\n \"test_two_kour1220.16o\"]\n self.observationFiles_single = self.resourcePath + \"test_kour1220.16o\"\n self.observationFiles_notfound = [\n self.resourcePath + \"test_kour1220_NOT.16o\"]\n self.observationList = [\"test_kour1220.16o\"]\n self.observationList_not_Found = [\"test_kour1220_NOT.16o\"]\n self.prnId = [\"E08\", \"E11\", \"E12\", \"E19\", \"E22\", \"E24\", \"E09\", \"E26\"]\n\n @raises(Exception)\n def testFileNotFoundError(self):\n self.classToTest.observationFilesDecoder(\n self.observationFiles_notfound, self.observationList_not_Found,\n self.prnId)\n assert inputDcbFile is None\n\n def testFileAndReturnCount(self):\n \"\"\"\n The return object should be one for each file\n \"\"\"\n stationObservationData = self.classToTest.observationFilesDecoder(\n self.observationFiles, self.observationList, self.prnId)\n assert len(stationObservationData) == 2\n\n def testTimeTagCalsPerFile(self):\n \"\"\"\n \"self.observationFilesSingle\" file has 2880 timetagcals -\n 24 * 60 * 2(per minute) with PRNID in given list.\n The count of timetagcals and the result number of time tag\n cals should match\n \"\"\"\n allObsRecData = self.classToTest.getObservationRecords(\n self.observationFilesSingle, self.prnId)\n assert len(allObsRecData) == 2880\n\n def testAllMandatoryHeaderData(self):\n \"\"\"\n Test for not null data in mandatory fields\n \"\"\"\n headerData = self.classToTest.extractObservationsFileHeaderData(\n self.observationFiles_single)\n assert headerData.RINEX_VERSION.strip()\n assert headerData.RINEX_TYPE.strip()\n assert headerData.SATELLITE_SYSTEM.strip()\n assert headerData.PROGRAM_CREATING_FILE.strip()\n assert headerData.FILE_CREATION_DATE.strip()\n assert headerData.MARKER_NAME.strip()\n assert headerData.MARKER_NUMBER.strip()\n assert headerData.OBSERVER_NAME.strip()\n assert headerData.AGENCY_NAME.strip()\n assert headerData.RECEIVER_NUMBER.strip()\n assert headerData.RECEIVER_TYPE.strip()\n assert headerData.RECEVIER_VERSION.strip()\n assert headerData.ANTENNA_NUMBER.strip()\n assert headerData.ANTENNA_TYPE.strip()\n assert headerData.APPROX_MARKER_POSITION.X_POSITION.strip()\n assert headerData.ANTENNA_DELTA_HEN.NORTH.strip()\n assert len(headerData.SYS_NUMBER_OBS_TYPES) > 0\n assert headerData.INTERVAL.strip()\n assert len(headerData.SYS_PHASE_SHIFTS) > 0\n assert (headerData.GLONASS_SLOT_FREQ_NUMBERS.\n NUMBER_OF_SATELLITES.strip())\n assert len(headerData.GLONASS_COD_PHS_BIS) > 0\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.5024917125701904, "alphanum_fraction": 0.5938538312911987, "avg_line_length": 37.22222137451172, "blob_id": "ce7622b006f20bf53537e351c32114cf0a6cdf5b", "content_id": "a0ff7f42fd16037be3eb4bebd12441c8b9efdc82", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2408, "license_type": "no_license", "max_line_length": 87, "num_lines": 63, "path": "/test/refEphermisClockDecoderTest.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "import unittest\nfrom nose.tools import *\nfrom kpi.refephermisclockdecoder.refEphermisClockDecoder import RefEphermisClockDecoder\n\n\nclass TestRefEphermisClockDecoder(unittest.TestCase):\n\n def setUp(self):\n \"\"\"\n setup call, runs before each test\n \"\"\"\n self.sp3List = [\"test/resources/sp3cparser/testtum18950.sp3\"]\n self.prnId = [\"E08\", \"E09\", \"E11\", \"E12\",\n \"E19\", \"E22\", \"E24\", \"E26\", \"E30\"]\n self.timeTagsCal = ['2016 5 1 0 0 0.00000000',\n '2016 5 1 0 5 0.00000000',\n '2016 5 1 0 10 0.00000000']\n self.classToTest = RefEphermisClockDecoder()\n\n def tearDown(self):\n \"\"\"\n tearDown call, runs after each test\n \"\"\"\n\n def testRefEphermisClockDecoderSuccess(self):\n result = self.classToTest.decodeRefEphermisAndClock(\n self.timeTagsCal, self.sp3List, self.prnId)\n assert len(result) == 30\n assert result[0].Vx_com is None\n assert result[1].Vx_com == \"-27816.774196\"\n assert result[1].X_com is None\n\n def testRefEphermisClockDecoderOnlyOnePrnId(self):\n result = self.classToTest.decodeRefEphermisAndClock(\n self.timeTagsCal, self.sp3List, [\"E08\"])\n assert len(result) == 6\n\n @raises(Exception)\n def testRefEphermisClockDecoderWrongTimeTag(self):\n timeTagsCalWrong = [\"2001 8 8 0 0 0.00000000\\n\",\n \"2001 8 8 0 0 0.00000000\\n\"]\n # lambda is a anonym function for python.\n # In this case it is used to avoid the programm to stop after exception\n # raises\n self.classToTest.decodeRefEphermisAndClock(\n timeTagsCalWrong, self.sp3List, self.prnId)\n\n def testTimeRefConsistencyCheckSuccess(self):\n timeTagsRef = ['2016 5 1 0 0 0.00000000',\n '2016 5 1 0 5 0.00000000',\n '2016 5 1 0 10 0.00000000']\n assert self.classToTest.timeRefConsistencyCheck(\n self.timeTagsCal, timeTagsRef)\n\n def testTimeRefConsistencyCheckFalse(self):\n timeTagsRef = [\"2001 8 8 0 0 0.00000000\\n\",\n \"2001 8 8 0 0 0.00000000\\n\"]\n assert self.classToTest.timeRefConsistencyCheck(\n self.timeTagsCal, timeTagsRef) is False\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.5555555820465088, "alphanum_fraction": 0.5576519966125488, "avg_line_length": 24.105262756347656, "blob_id": "ba68f92271e6ceedc5e3437ffaa7b45b166128b5", "content_id": "0ff490d2be0f412f50011ff967e44bf15bc1ce39", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 477, "license_type": "no_license", "max_line_length": 54, "num_lines": 19, "path": "/data/antennaNorthEastUp.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nclass AntennaNorthEastUp:\n \"\"\"\n A class variable for\n AntennaFrequency (NEU)\n ObservationsDataHeader ()\n Parameters:\n NORTH = offset position to north in meters\n EAST = offset position to east in meters\n UP = offset position to up in meters\n \"\"\"\n\n def __init__(self, north=\"\", east=\"\", up=\"\"):\n self.NORTH = north\n self.EAST = east\n self.UP = up\n" }, { "alpha_fraction": 0.5764411091804504, "alphanum_fraction": 0.5789473652839661, "avg_line_length": 23.9375, "blob_id": "d3f72149cd7d5b0c1a302629e2b34da50e2501ae", "content_id": "8c7505c749295583c62a68ff323cb98865b6f279", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 399, "license_type": "no_license", "max_line_length": 76, "num_lines": 16, "path": "/data/antennaFrequency.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nclass AntennaFrequency:\n \"\"\"\n A Class variable for AntennaPhaseOffset(FREQUENCY_LIST) and contains the\n name and the offset Data\n Paramters:\n NAME = Name of the frequency\n NEU = Offset Data in meters (NORTH, EAST , UP)\n \"\"\"\n\n def __init__(self, neu=\"\", name=\"\"):\n self.NAME = name\n self.NEU = neu\n" }, { "alpha_fraction": 0.572541356086731, "alphanum_fraction": 0.5754625201225281, "avg_line_length": 41.79166793823242, "blob_id": "4209fac31577dff480a1e51c52eab0a262bf7a41", "content_id": "61f835e90c85eb0e013caf4d2f87684dc511129b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1027, "license_type": "no_license", "max_line_length": 75, "num_lines": 24, "path": "/data/sysObservationTypes.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nclass SysObservationTypes:\n \"\"\"\n Class variable for ObservationsDataHeader (SYS_NUMBER_OBS_TYPES)\n SYS / # / OBS TYPES\n Parameters:\n SATELLITE_SYSTEM_CODE - Satellite system code (G/R/E/J/C/I/S)\n NUMBER_OF_OBSERVATION_TYPES - Number of different observation types\n for the specified satellite system\n OBSERVATION_DESCRIPTORS - Observation descriptors:\n Type\n Band\n Attribute\n Use continuation line(s) for more than 13 observation descriptors.\n \"\"\"\n\n def __init__(self, satelliteSystemCode=\"\", numberOfObservationTypes=\"\",\n observationDescriptors=\"\"):\n self.SATELLITE_SYSTEM_CODE = satelliteSystemCode\n self.NUMBER_OF_OBSERVATION_TYPES = numberOfObservationTypes\n self.OBSERVATION_DESCRIPTORS = observationDescriptors\n" }, { "alpha_fraction": 0.45755237340927124, "alphanum_fraction": 0.4597574472427368, "avg_line_length": 36.79166793823242, "blob_id": "65e6fdbe8d81cd9db1ad5f1e8a93e8d49472ff84", "content_id": "06f2d6dcedb83c5dc6adc174a9a2802fe278b725", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 907, "license_type": "no_license", "max_line_length": 76, "num_lines": 24, "path": "/data/timeOfObservation.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nclass TimeOfObservation:\n \"\"\"\n Class variable for ObservationsDataHeader\n (TIME_OF_FIRST_OBS, TIME_OF_LAST_OBS)\n\n Parameters:\n TIME_OF_OBSERVATION - Time of last observation record (4-digit-year,\n month,day,hour,min,sec)\n TIME_SYSTEM - Time system:\n GPS (=GPS time system)\n GLO (=UTC time system)\n GAL (=Galileo time system)\n QZS (= QZSS time system)\n BDT (= BDS time system)\n IRN (= IRNSS time system)\n \"\"\"\n\n def _init_(self, timeOfObservation, timeSystem):\n self.TIME_OF_OBSERVATION = timeOfObservation\n self.TIME_SYSTEM = timeSystem\n" }, { "alpha_fraction": 0.3858520984649658, "alphanum_fraction": 0.47909969091415405, "avg_line_length": 14.550000190734863, "blob_id": "5c1fccfe8af4fee50b372835b2dfd738cc6c3111", "content_id": "c7186d8b00509d1c6adc6b13c8e1b71264298062", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 311, "license_type": "no_license", "max_line_length": 26, "num_lines": 20, "path": "/data/serviceEnum.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom enum import Enum\n\n\nclass Service(Enum):\n \"\"\"\n enum for the services.\n E1 = \"E1\"\n E5a = \"E5a\"\n E5b = \"E5b\"\n E1E5a = \"E1-E5a\"\n E1E5b = \"E1-E5b\"\n \"\"\"\n E1 = \"E1\"\n E5a = \"E5a\"\n E5b = \"E5b\"\n E1E5a = \"E1-E5a\"\n E1E5b = \"E1-E5b\"\n" }, { "alpha_fraction": 0.7777777910232544, "alphanum_fraction": 0.8611111044883728, "avg_line_length": 6.400000095367432, "blob_id": "2a9d4f9ae6fdb81c3066b4a5753b6e7ddb0269db", "content_id": "3b46fb7dabab701a70b087795bfbe393d242b280", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 36, "license_type": "no_license", "max_line_length": 11, "num_lines": 5, "path": "/requirements.txt", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "novas\nnovas_de405\nscipy\nnumpy\nmpmath" }, { "alpha_fraction": 0.5676328539848328, "alphanum_fraction": 0.6388888955116272, "avg_line_length": 32.79591751098633, "blob_id": "24c961f254286f619b722baacc598ff2b5ff57ce", "content_id": "440405e99ffacf04d45019d57729ab8180fc7215", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1656, "license_type": "no_license", "max_line_length": 72, "num_lines": 49, "path": "/test/commonTest.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport unittest\nfrom nose.tools import *\nimport datetime\nimport sys\nfrom kpi.helper.common import Common\n\n\nclass TestCommon(unittest.TestCase):\n\n def setUp(self):\n \"\"\"\n setup call, runs before each test\n \"\"\"\n self.classToTest = Common\n self.fileDate = \"2016 03 01 00 00 00\"\n self.fileNextDate = \"2016 03 02 00 00 00\"\n self.assertDate = datetime.datetime(2016, 3, 1, 0, 0, 0)\n self.assertNextDate = datetime.datetime(2016, 3, 2, 0, 0, 0)\n\n def testGetDatetimeFromGregorianDate(self):\n assert self.classToTest.getDatetimeFromGregorianDate(\n self.fileDate) == self.assertDate\n assert self.classToTest.getDatetimeFromGregorianDate(\n self.fileNextDate) == self.assertNextDate\n\n def testGetDayOfWeek(self):\n assert self.classToTest.getDayOfWeek(\n datetime.datetime(2016, 8, 22, 0, 0, 0)) == 1\n assert self.classToTest.getDayOfWeek(\n datetime.datetime(2016, 8, 28, 0, 0, 0)) == 0\n\n def testGetGpsDate(self):\n gpdsDate = self.classToTest.convertUtfToGpsFormat(\n datetime.datetime(2016, 7, 2, 23, 55, 0))\n assert gpdsDate.GPS_WEEK == 1903\n assert gpdsDate.SECONDS_OF_WEEK == 604500.0\n gpdsDate = self.classToTest.convertUtfToGpsFormat(\n datetime.datetime(2016, 7, 3, 1, 30, 0))\n assert gpdsDate.GPS_WEEK == 1904\n assert gpdsDate.SECONDS_OF_WEEK == 5400.0\n\n def testConvertToBinary(self):\n assert self.classToTest.convertDecimalToBinary(5, 6) == \"000101\"\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.565990149974823, "alphanum_fraction": 0.5707802772521973, "avg_line_length": 42.349727630615234, "blob_id": "6afda28bb7150eba1fc5bb14bf52d52bcf439f84", "content_id": "a86cd5f04c60ecf4f40d758d6d5ff64f6818547f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7933, "license_type": "no_license", "max_line_length": 79, "num_lines": 183, "path": "/configParser/configFileParser.py", "repo_name": "tsidv/kpi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport configparser\nimport datetime\nfrom kpi.data.configFileModel import ConfigFileModel\nfrom kpi.helper.common import Common\n\n\nclass ConfigFileParser:\n \"\"\"\n reads the configuration file in text format\n and converts to internal parameters\n \"\"\"\n log = None\n # to be usable as singelton with standard configuration file\n basePath = \"resources\"\n inputPath = basePath + \"/References/input_files/\"\n configFile = basePath + \"/Configuration.cfg\"\n model = None\n\n # TODO: define this class as singelton\n def kpiCfg(self):\n \"\"\"\n default access loading configuration from 'resources/Configuration.cfg'\n \"\"\"\n if self.model is None:\n self.model = self.variablesReader(self.configFile)\n return self.model\n\n def configFileParser(self, configFile):\n \"\"\"\n incule the whole configFileParser process.\n configFile File: configuration File to parse\n return ...\n \"\"\"\n configFileModel = self.variablesReader(configFile)\n Common.setUpLoggingFile(configFileModel.LOG_FILE)\n self.log = Common.getLogger('ConfigFileParser')\n timeTagCal, timeTagGps, bceList, sp3List, dcbList, observableList\\\n = self.timeTagFileNamesGenerator(configFileModel.START_EPOCH,\n configFileModel.END_EPOCH,\n configFileModel.STEP,\n configFileModel.GAL_BRC,\n configFileModel.GAL_REF,\n configFileModel.DCB_REF,\n configFileModel.STATION_LIST,\n configFileModel.INP_FLD)\n LS = self.leapSecondsGenerator(\n timeTagCal, configFileModel.LEAP_SEC)\n return timeTagCal, timeTagGps, bceList, sp3List, dcbList, \\\n observableList, LS\n\n def variablesReader(self, configFile):\n \"\"\"\n parses the config file and returns the cofiguration file model\n configFile File: configuration file to parse\n \"\"\"\n config = configparser.ConfigParser()\n config.read(configFile)\n if config is None:\n Exception(\"Configfile not found.\")\n leapSeconds = []\n leapSecondsCombined = config[\"leap seconds\"][\"LEAP_SECONDS\"].split(\",\")\n for combinedLeapSecond in leapSecondsCombined:\n combinedTupel = combinedLeapSecond.split(\" \")\n leapSeconds.append((combinedTupel[0], combinedTupel[1]))\n stationList = []\n for station in config[\"stations\"][\"STATION_LIST\"].split(\",\"):\n stationList.append(station.replace(\" \", \"\"))\n model = ConfigFileModel(config[\"logfile\"][\"LOG_FILE\"],\n config[\"time\"][\n \"START_EPOCH\"].replace(\",\", \" \"),\n config[\"time\"][\n \"END_EPOCH\"].replace(\",\", \" \"),\n config[\"time\"][\"STEP\"],\n leapSeconds,\n config[\"gnss\"][\"AOD_MAX\"],\n config[\"source\"][\"GAL_BRC\"],\n config[\"source\"][\"GAL_REF\"],\n config[\"source\"][\"APC_OFF_REF\"],\n config[\"source\"][\"APC_OFF_BRC\"],\n config[\"source\"][\"DCB_REF\"],\n config[\"source\"][\"INP_FLD\"],\n config[\"source\"][\"OUT_FLD\"],\n config[\"selection\"][\"PRN_ID\"],\n config[\"selection\"][\"SERVICE\"],\n stationList\n )\n return model\n\n def timeTagFileNamesGenerator(self, startEpoch, endEpoch, step,\n galBrc, galRef, dcbRef, stationList,\n inpField):\n self.log.info(\"Start Time Tag and File Names Generator...\")\n timeTagCal = [startEpoch]\n endEpochDate = Common.getDatetimeFromGregorianDate(endEpoch)\n tempDate = Common.getDatetimeFromGregorianDate(startEpoch)\n timeTagGps = [Common.convertUtfToGpsFormat(tempDate)]\n bceList = []\n sp3List = []\n dcbList = []\n observableList = []\n # namings for start day\n self.log.info(\"Generate names for start day.\")\n brcName, sp3cName, dcbName, stationNames = self.generateNamesForOneDay(\n tempDate, galBrc, galRef, dcbRef, stationList, inpField)\n bceList.append(brcName)\n sp3List.append(sp3cName)\n dcbList.append(dcbName)\n observableList.append(stationNames)\n # calculate steps. staps + 2 for first and last step\n timeDifference = endEpochDate - tempDate\n calculatedSteps = timeDifference.total_seconds() / int(step) + 1\n k = 1\n while (tempDate < endEpochDate):\n timeTagCal.append(tempDate.strftime(\"%Y %m %d %H %M %S\"))\n timeTagGps.append(Common.convertUtfToGpsFormat(tempDate))\n oldTempDate = tempDate\n tempDate += datetime.timedelta(seconds=int(step))\n if (oldTempDate.day < tempDate.day) & (tempDate < endEpochDate):\n print(tempDate, endEpochDate)\n self.log.info(\"next Day in Epoch. New names will generated.\")\n brcName, sp3cName, dcbName, stationNames = \\\n self.generateNamesForOneDay(tempDate, galBrc, galRef,\n dcbRef, stationList, inpField)\n bceList.append(brcName)\n sp3List.append(sp3cName)\n dcbList.append(dcbName)\n observableList.append(stationNames)\n k += 1\n if calculatedSteps != k:\n self.log.error(\n \"The Steps are not coherent! Refresh configuration file.\")\n raise Exception(\n \"The Steps are not coherent! Refresh configuration file.\")\n self.log.info(\"End Time Tag and File Names Generator.\")\n return timeTagCal, timeTagGps, bceList, sp3List, dcbList,\\\n observableList\n\n def generateNamesForOneDay(self, day, galBrc, galRef, dcbRef,\n stationList, inpField):\n \"\"\"\n generate the naming for boardcast ephemeris, precise orbits\n dcb and station observables.\n paramters:\n day datetime: day for names\n stationList [string]: name of stations\n inpField string: input resource directory\n returns brcName (string), sp3cName (string),\n dcbName (string), stationNames ([string])\n \"\"\"\n gpsTimeTag = Common.convertUtfToGpsFormat(day)\n doy = day.strftime(\"%j\")\n dow = Common.getDayOfWeek(day)\n brcName = inpField + \"/\" + galBrc + doy + \\\n \"0.\" + day.strftime(\"%Y\")[2:4] + \"p\"\n sp3cName = inpField + \"/\" + galRef + \\\n str(gpsTimeTag.GPS_WEEK) + str(dow) + \".sp3\"\n dcbName = inpField + \"/\" + dcbRef + \"_\" + \\\n day.strftime(\"%Y\") + doy + \"0000_01D_01D_DCB.BSX\"\n stationNames = []\n for station in stationList:\n stationNames.append(inpField + \"/\" + station + doy + \"0.\" +\n day.strftime(\"%Y\")[2:4] + \"o\")\n return brcName, sp3cName, dcbName, stationNames\n\n def leapSecondsGenerator(self, timeTagsCal, leapSeconds):\n \"\"\"\n measures the leap seconds coming from configfile for the timetags.\n timeTagsCal [String]: time tags coming from timeTag generator\n leapSeconds [(String, String)]: specified in the configfile\n return list of leapseconds [String]\n \"\"\"\n leaps = []\n for timeTag in timeTagsCal:\n ls = \"0\"\n ggTimeTag = Common.getDatetimeFromGregorianDate(timeTag)\n for lsTupel in leapSeconds:\n lsDate = Common.getDatetimeFromGregorianDate(lsTupel[0])\n if lsDate <= ggTimeTag:\n ls = lsTupel[1]\n leaps.append(ls)\n return leaps\n" } ]
62
hrnciar/poezio
https://github.com/hrnciar/poezio
ff30dc3f0ac37e5a9814c41e4100ae2f099a585e
12b8af11df35dda535412b0c02ba792890095a7d
014df5b4fa3fef822d12eb5f3505ef910e107588
refs/heads/main
2023-04-09T08:10:11.042967
2021-03-20T17:22:52
2021-03-20T17:22:52
353,601,174
0
0
Zlib
2021-04-01T06:41:06
2021-03-20T19:07:48
2021-03-20T19:07:46
null
[ { "alpha_fraction": 0.5726630091667175, "alphanum_fraction": 0.5726630091667175, "avg_line_length": 23.960784912109375, "blob_id": "8e4f43d998d405f5b9c150b0f7b613ef2a886898", "content_id": "e1ebe5e056a585344fff7992dae1cbba59732df5", "detected_licenses": [ "CC-BY-2.0", "Zlib" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1273, "license_type": "permissive", "max_line_length": 71, "num_lines": 51, "path": "/poezio/args.py", "repo_name": "hrnciar/poezio", "src_encoding": "UTF-8", "text": "\"\"\"\nModule related to the argument parsing\n\nThere is a fallback to the deprecated optparse if argparse is not found\n\"\"\"\nfrom pathlib import Path\nfrom argparse import ArgumentParser, SUPPRESS\n\nfrom poezio.version import __version__\n\n\ndef parse_args(CONFIG_PATH: Path):\n \"\"\"\n Parse the arguments from the command line\n \"\"\"\n parser = ArgumentParser('poezio')\n parser.add_argument(\n \"-c\",\n \"--check-config\",\n dest=\"check_config\",\n action='store_true',\n help='Check the config file')\n parser.add_argument(\n \"-d\",\n \"--debug\",\n dest=\"debug\",\n help=\"The file where debug will be written\",\n metavar=\"DEBUG_FILE\")\n parser.add_argument(\n \"-f\",\n \"--file\",\n dest=\"filename\",\n default=CONFIG_PATH / 'poezio.cfg',\n type=Path,\n help=\"The config file you want to use\",\n metavar=\"CONFIG_FILE\")\n parser.add_argument(\n '-v',\n '--version',\n action='version',\n version='Poezio v%s' % __version__,\n )\n parser.add_argument(\n \"--custom-version\",\n dest=\"custom_version\",\n help=SUPPRESS,\n metavar=\"VERSION\",\n default=__version__\n )\n options = parser.parse_args()\n return options\n" }, { "alpha_fraction": 0.5670665502548218, "alphanum_fraction": 0.5681570172309875, "avg_line_length": 23.7702693939209, "blob_id": "43e5b049e467c45facb74e08681365fdbf45e0ad", "content_id": "af1b9d4ae8e99680a5f154ccff14a0450a708e31", "detected_licenses": [ "CC-BY-2.0", "Zlib" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1834, "license_type": "permissive", "max_line_length": 60, "num_lines": 74, "path": "/test/test_windows.py", "repo_name": "hrnciar/poezio", "src_encoding": "UTF-8", "text": "import pytest\n\nclass ConfigShim(object):\n def get(self, *args, **kwargs):\n return ''\n\nfrom poezio import config\nconfig.config = ConfigShim()\n\nfrom poezio.windows import Input, HistoryInput, MessageInput\n\nclass SubInput(Input):\n def rewrite_text(self, *args, **kwargs):\n return None\n\[email protected]\ndef input():\n return SubInput()\n\nclass TestInput(object):\n\n def test_do_command(self, input):\n\n input.do_command('a')\n assert input.text == 'a'\n\n for char in 'coucou':\n input.do_command(char)\n assert input.text == 'acoucou'\n\n def test_empty(self, input):\n assert input.is_empty() == True\n input.do_command('a')\n assert input.is_empty() == False\n\n def test_key_left(self, input):\n for char in 'this is a line':\n input.do_command(char)\n for i in range(4):\n input.key_left()\n for char in 'long ':\n input.do_command(char)\n\n assert input.text == 'this is a long line'\n\n def test_key_right(self, input):\n for char in 'this is a line':\n input.do_command(char)\n for i in range(4):\n input.key_left()\n input.key_right()\n\n for char in 'iii':\n input.do_command(char)\n\n assert input.text == 'this is a liiiine'\n\n def test_key_home(self, input):\n for char in 'this is a line of text':\n input.do_command(char)\n input.do_command('z')\n input.key_home()\n input.do_command('a')\n\n assert input.text == 'athis is a line of textz'\n\n def test_key_end(self, input):\n for char in 'this is a line of text':\n input.do_command(char)\n input.key_home()\n input.key_end()\n input.do_command('z')\n\n assert input.text == 'this is a line of textz'\n\n" }, { "alpha_fraction": 0.5926609635353088, "alphanum_fraction": 0.5971121191978455, "avg_line_length": 30.871973037719727, "blob_id": "d4bf5d42a1f56f79845398782566158d2f8cac3a", "content_id": "468d12e79bc005136930664a625906d6eaef667b", "detected_licenses": [ "CC-BY-2.0", "Zlib" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9211, "license_type": "permissive", "max_line_length": 95, "num_lines": 289, "path": "/poezio/mam.py", "repo_name": "hrnciar/poezio", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\n Query and control an archive of messages stored on a server using\n XEP-0313: Message Archive Management(MAM).\n\"\"\"\n\nimport asyncio\nimport logging\nimport random\nfrom datetime import datetime, timedelta, timezone\nfrom hashlib import md5\nfrom typing import (\n Any,\n AsyncIterable,\n Callable,\n Dict,\n List,\n Optional,\n)\n\nfrom slixmpp import JID, Message as SMessage\nfrom slixmpp.exceptions import IqError, IqTimeout\nfrom poezio.theming import get_theme\nfrom poezio import tabs\nfrom poezio import xhtml, colors\nfrom poezio.config import config\nfrom poezio.common import to_utc\nfrom poezio.text_buffer import TextBuffer, HistoryGap\nfrom poezio.ui.types import (\n BaseMessage,\n EndOfArchive,\n Message,\n)\n\n\nlog = logging.getLogger(__name__)\n\nclass DiscoInfoException(Exception): pass\nclass MAMQueryException(Exception): pass\nclass NoMAMSupportException(Exception): pass\n\n\ndef make_line(\n tab: tabs.ChatTab,\n text: str,\n time: datetime,\n jid: JID,\n identifier: str = '',\n ) -> Message:\n \"\"\"Adds a textual entry in the TextBuffer\"\"\"\n\n # Convert to local timezone\n time = time.replace(tzinfo=timezone.utc).astimezone(tz=None)\n time = time.replace(tzinfo=None)\n\n if isinstance(tab, tabs.MucTab):\n nick = jid.resource\n user = tab.get_user_by_name(nick)\n if user:\n color = user.color\n else:\n theme = get_theme()\n if theme.ccg_palette:\n fg_color = colors.ccg_text_to_color(theme.ccg_palette, nick)\n color = fg_color, -1\n else:\n mod = len(theme.LIST_COLOR_NICKNAMES)\n nick_pos = int(md5(nick.encode('utf-8')).hexdigest(), 16) % mod\n color = theme.LIST_COLOR_NICKNAMES[nick_pos]\n else:\n if jid.bare == tab.core.xmpp.boundjid.bare:\n nick = tab.core.own_nick\n color = get_theme().COLOR_OWN_NICK\n else:\n color = get_theme().COLOR_REMOTE_USER\n nick = tab.get_nick()\n return Message(\n txt=text,\n identifier=identifier,\n time=time,\n nickname=nick,\n nick_color=color,\n history=True,\n user=None,\n )\n\nasync def get_mam_iterator(\n core,\n groupchat: bool,\n remote_jid: JID,\n amount: int,\n reverse: bool = True,\n start: Optional[str] = None,\n end: Optional[str] = None,\n before: Optional[str] = None,\n ) -> AsyncIterable[Message]:\n \"\"\"Get an async iterator for this mam query\"\"\"\n try:\n query_jid = remote_jid if groupchat else JID(core.xmpp.boundjid.bare)\n iq = await core.xmpp.plugin['xep_0030'].get_info(jid=query_jid)\n except (IqError, IqTimeout):\n raise DiscoInfoException()\n if 'urn:xmpp:mam:2' not in iq['disco_info'].get_features():\n raise NoMAMSupportException()\n\n args: Dict[str, Any] = {\n 'iterator': True,\n 'reverse': reverse,\n }\n\n if groupchat:\n args['jid'] = remote_jid\n else:\n args['with_jid'] = remote_jid\n\n if amount > 0:\n args['rsm'] = {'max': amount}\n args['start'] = start\n args['end'] = end\n return core.xmpp['xep_0313'].retrieve(**args)\n\n\ndef _parse_message(msg: SMessage) -> Dict:\n \"\"\"Parse info inside a MAM forwarded message\"\"\"\n forwarded = msg['mam_result']['forwarded']\n message = forwarded['stanza']\n return {\n 'time': forwarded['delay']['stamp'],\n 'jid': message['from'],\n 'text': message['body'],\n 'identifier': message['origin-id']\n }\n\n\nasync def retrieve_messages(tab: tabs.ChatTab,\n results: AsyncIterable[SMessage],\n amount: int = 100) -> List[BaseMessage]:\n \"\"\"Run the MAM query and put messages in order\"\"\"\n msg_count = 0\n msgs = []\n to_add = []\n try:\n async for rsm in results:\n for msg in rsm['mam']['results']:\n if msg['mam_result']['forwarded']['stanza'] \\\n .xml.find('{%s}%s' % ('jabber:client', 'body')) is not None:\n args = _parse_message(msg)\n msgs.append(make_line(tab, **args))\n for msg in reversed(msgs):\n to_add.append(msg)\n msg_count += 1\n if msg_count == amount:\n to_add.reverse()\n return to_add\n msgs = []\n to_add.reverse()\n return to_add\n except (IqError, IqTimeout) as exc:\n log.debug('Unable to complete MAM query: %s', exc, exc_info=True)\n raise MAMQueryException('Query interrupted')\n\n\nasync def fetch_history(tab: tabs.ChatTab,\n start: Optional[datetime] = None,\n end: Optional[datetime] = None,\n amount: int = 100) -> List[BaseMessage]:\n remote_jid = tab.jid\n if not end:\n for msg in tab._text_buffer.messages:\n if isinstance(msg, Message):\n end = msg.time\n end -= timedelta(microseconds=1)\n break\n if end is None:\n end = datetime.now()\n end = to_utc(end)\n end_str = datetime.strftime(end, '%Y-%m-%dT%H:%M:%SZ')\n\n start_str = None\n if start is not None:\n start = to_utc(start)\n start_str = datetime.strftime(start, '%Y-%m-%dT%H:%M:%SZ')\n\n mam_iterator = await get_mam_iterator(\n core=tab.core,\n groupchat=isinstance(tab, tabs.MucTab),\n remote_jid=remote_jid,\n amount=amount,\n end=end_str,\n start=start_str,\n reverse=True,\n )\n return await retrieve_messages(tab, mam_iterator, amount)\n\nasync def fill_missing_history(tab: tabs.ChatTab, gap: HistoryGap) -> None:\n start = gap.last_timestamp_before_leave\n end = gap.first_timestamp_after_join\n if start:\n start = start + timedelta(seconds=1)\n if end:\n end = end - timedelta(seconds=1)\n try:\n messages = await fetch_history(tab, start=start, end=end, amount=999)\n tab._text_buffer.add_history_messages(messages, gap=gap)\n if messages:\n tab.core.refresh_window()\n except (NoMAMSupportException, MAMQueryException, DiscoInfoException):\n return\n finally:\n tab.query_status = False\n\nasync def on_new_tab_open(tab: tabs.ChatTab) -> None:\n \"\"\"Called when opening a new tab\"\"\"\n amount = 2 * tab.text_win.height\n end = datetime.now()\n for message in tab._text_buffer.messages:\n if isinstance(message, Message) and to_utc(message.time) < to_utc(end):\n end = message.time\n break\n end = end - timedelta(microseconds=1)\n try:\n messages = await fetch_history(tab, end=end, amount=amount)\n tab._text_buffer.add_history_messages(messages)\n if messages:\n tab.core.refresh_window()\n except (NoMAMSupportException, MAMQueryException, DiscoInfoException):\n return None\n finally:\n tab.query_status = False\n\n\ndef schedule_tab_open(tab: tabs.ChatTab) -> None:\n \"\"\"Set the query status and schedule a MAM query\"\"\"\n tab.query_status = True\n asyncio.ensure_future(on_tab_open(tab))\n\n\nasync def on_tab_open(tab: tabs.ChatTab) -> None:\n gap = tab._text_buffer.find_last_gap_muc()\n if gap is None or not gap.leave_message:\n await on_new_tab_open(tab)\n else:\n await fill_missing_history(tab, gap)\n\n\ndef schedule_scroll_up(tab: tabs.ChatTab) -> None:\n \"\"\"Set query status and schedule a scroll up\"\"\"\n tab.query_status = True\n asyncio.ensure_future(on_scroll_up(tab))\n\n\nasync def on_scroll_up(tab: tabs.ChatTab) -> None:\n tw = tab.text_win\n\n # If position in the tab is < two screen pages, then fetch MAM, so that we\n # keep some prefetched margin. A first page should also be prefetched on\n # join if not already available.\n total, pos, height = len(tw.built_lines), tw.pos, tw.height\n rest = (total - pos) // height\n\n if rest > 1:\n tab.query_status = False\n return None\n\n try:\n # XXX: Do we want to fetch a possibly variable number of messages?\n # (InfoTab changes height depending on the type of messages, see\n # `information_buffer_popup_on`).\n messages = await fetch_history(tab, amount=height)\n last_message_exists = False\n if tab._text_buffer.messages:\n last_message = tab._text_buffer.messages[0]\n last_message_exists = True\n if not messages and last_message_exists and not isinstance(last_message, EndOfArchive):\n time = tab._text_buffer.messages[0].time\n messages = [EndOfArchive('End of archive reached', time=time)]\n tab._text_buffer.add_history_messages(messages)\n if messages:\n tab.core.refresh_window()\n except NoMAMSupportException:\n tab.core.information('MAM not supported for %r' % tab.jid, 'Info')\n return None\n except (MAMQueryException, DiscoInfoException):\n tab.core.information('An error occured when fetching MAM for %r' % tab.jid, 'Error')\n return None\n finally:\n tab.query_status = False\n" }, { "alpha_fraction": 0.5499367713928223, "alphanum_fraction": 0.6131479144096375, "avg_line_length": 46.93939208984375, "blob_id": "84486fd8bfecc945c9b28014ef51b1a2fcb6bf70", "content_id": "09ba720e0b99653ee15ec8740c4d34e7f157e6d2", "detected_licenses": [ "CC-BY-2.0", "Zlib" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1586, "license_type": "permissive", "max_line_length": 131, "num_lines": 33, "path": "/test/test_logger.py", "repo_name": "hrnciar/poezio", "src_encoding": "UTF-8", "text": "\"\"\"\nTest the functions in the `logger` module\n\"\"\"\nimport datetime\nfrom poezio.logger import LogMessage, parse_log_line, parse_log_lines, build_log_message\nfrom poezio.common import get_utc_time, get_local_time\n\ndef test_parse_message():\n line = 'MR 20170909T09:09:09Z 000 <nick>  body'\n assert vars(parse_log_line(line, 'user@domain')) == vars(LogMessage('2017', '09', '09', '09', '09', '09', '0', 'nick', 'body'))\n\n line = '<>'\n assert parse_log_line(line, 'user@domain') is None\n\n line = 'MR 20170908T07:05:04Z 003 <nick>  '\n assert vars(parse_log_line(line, 'user@domain')) == vars(LogMessage('2017', '09', '08', '07', '05', '04', '003', 'nick', ''))\n\n\ndef test_log_and_parse_messages():\n msg1 = {'nick': 'toto', 'msg': 'coucou', 'date': datetime.datetime.now().replace(microsecond=0)}\n msg1_utc = get_utc_time(msg1['date'])\n built_msg1 = build_log_message(**msg1)\n assert built_msg1 == 'MR %s 000 <toto>  coucou\\n' % (msg1_utc.strftime('%Y%m%dT%H:%M:%SZ'))\n\n msg2 = {'nick': 'toto', 'msg': 'coucou\\ncoucou', 'date': datetime.datetime.now().replace(microsecond=0)}\n built_msg2 = build_log_message(**msg2)\n msg2_utc = get_utc_time(msg2['date'])\n assert built_msg2 == 'MR %s 001 <toto>  coucou\\n coucou\\n' % (msg2_utc.strftime('%Y%m%dT%H:%M:%SZ'))\n\n assert parse_log_lines((built_msg1 + built_msg2).split('\\n'), 'user@domain') == [\n {'time': msg1['date'], 'history': True, 'txt': '\\x195,-1}coucou', 'nickname': 'toto'},\n {'time': msg2['date'], 'history': True, 'txt': '\\x195,-1}coucou\\ncoucou', 'nickname': 'toto'},\n ]\n" }, { "alpha_fraction": 0.525138795375824, "alphanum_fraction": 0.5300555229187012, "avg_line_length": 30.520000457763672, "blob_id": "175c07d4a0f8867646a3b80a89851d844881e5e8", "content_id": "8c04952598ace5401337025c0415d6458701b137", "detected_licenses": [ "CC-BY-2.0", "Zlib" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6305, "license_type": "permissive", "max_line_length": 88, "num_lines": 200, "path": "/poezio/ui/types.py", "repo_name": "hrnciar/poezio", "src_encoding": "UTF-8", "text": "\nfrom datetime import datetime\nfrom math import ceil, log10\nfrom typing import Union, Optional, List, Tuple\nfrom poezio.ui.funcs import truncate_nick\nfrom poezio import poopt\nfrom poezio.user import User\nfrom poezio.theming import dump_tuple, get_theme\n\n\nclass BaseMessage:\n __slots__ = ('txt', 'time', 'identifier')\n\n def __init__(self, txt: str, identifier: str = '', time: Optional[datetime] = None):\n self.txt = txt\n self.identifier = identifier\n if time is not None:\n self.time = time\n else:\n self.time = datetime.now()\n\n def compute_offset(self, with_timestamps: bool, nick_size: int) -> int:\n theme = get_theme()\n return theme.SHORT_TIME_FORMAT_LENGTH + 1\n\n\nclass EndOfArchive(BaseMessage):\n \"\"\"Marker added to a buffer when we reach the end of a MAM archive\"\"\"\n\n\nclass InfoMessage(BaseMessage):\n def __init__(self, txt: str, identifier: str = '', time: Optional[datetime] = None):\n txt = ('\\x19%s}' % dump_tuple(get_theme().COLOR_INFORMATION_TEXT)) + txt\n super().__init__(txt=txt, identifier=identifier, time=time)\n\n\nclass MucOwnLeaveMessage(InfoMessage):\n \"\"\"Status message displayed on our room leave/kick/ban\"\"\"\n\n\nclass MucOwnJoinMessage(InfoMessage):\n \"\"\"Status message displayed on our room join\"\"\"\n\n\nclass XMLLog(BaseMessage):\n \"\"\"XML Log message\"\"\"\n __slots__ = ('incoming')\n\n def __init__(\n self,\n txt: str,\n incoming: bool,\n ):\n BaseMessage.__init__(\n self,\n txt=txt,\n identifier='',\n )\n self.txt = txt\n self.identifier = ''\n self.incoming = incoming\n\n def compute_offset(self, with_timestamps: bool, nick_size: int) -> int:\n offset = 0\n theme = get_theme()\n if with_timestamps:\n offset += 1 + theme.SHORT_TIME_FORMAT_LENGTH\n if self.incoming:\n nick = theme.CHAR_XML_IN\n else:\n nick = theme.CHAR_XML_OUT\n nick = truncate_nick(nick, nick_size) or ''\n offset += 1 + len(nick)\n return offset\n\n\nclass StatusMessage(BaseMessage):\n __slots__ = ('format_string', 'format_args')\n\n def __init__(self, format_string: str, format_args: dict):\n BaseMessage.__init__(\n self,\n txt='',\n )\n self.format_string = format_string\n self.format_args = format_args\n self.rebuild()\n\n def rebuild(self):\n real_args = {}\n for key, func in self.format_args.items():\n real_args[key] = func()\n self.txt = self.format_string.format(**real_args)\n\n\nclass Message(BaseMessage):\n __slots__ = ('nick_color', 'nickname', 'user', 'delayed', 'history',\n 'top', 'highlight', 'me', 'old_message', 'revisions',\n 'jid', 'ack')\n\n def __init__(self,\n txt: str,\n nickname: Optional[str],\n time: Optional[datetime] = None,\n nick_color: Optional[Tuple] = None,\n delayed: bool = False,\n history: bool = False,\n user: Optional[User] = None,\n identifier: Optional[str] = '',\n top: Optional[bool] = False,\n highlight: bool = False,\n old_message: Optional['Message'] = None,\n revisions: int = 0,\n jid: Optional[str] = None,\n ack: int = 0) -> None:\n \"\"\"\n Create a new Message object with parameters, check for /me messages,\n and delayed messages\n \"\"\"\n BaseMessage.__init__(\n self,\n txt=txt.replace('\\t', ' ') + '\\x19o',\n identifier=identifier or '',\n time=time,\n )\n if txt.startswith('/me '):\n me = True\n txt = '\\x19%s}%s\\x19o' % (dump_tuple(get_theme().COLOR_ME_MESSAGE),\n txt[4:])\n else:\n me = False\n self.txt = txt\n self.delayed = delayed or history\n self.history = history\n self.nickname = nickname\n self.nick_color = nick_color\n self.user = user\n self.top = top\n self.highlight = highlight\n self.me = me\n self.old_message = old_message\n self.revisions = revisions\n self.jid = jid\n self.ack = ack\n\n def _other_elems(self) -> str:\n \"Helper for the repr_message function\"\n acc = []\n fields = list(self.__slots__)\n fields.remove('old_message')\n for field in fields:\n acc.append('%s=%s' % (field, repr(getattr(self, field))))\n return 'Message(%s, %s' % (', '.join(acc), 'old_message=')\n\n def __repr__(self) -> str:\n \"\"\"\n repr() for the Message class, for debug purposes, since the default\n repr() is recursive, so it can stack overflow given too many revisions\n of a message\n \"\"\"\n init = self._other_elems()\n acc = [init]\n next_message = self.old_message\n rev = 1\n while next_message is not None:\n acc.append(next_message._other_elems())\n next_message = next_message.old_message\n rev += 1\n acc.append('None')\n while rev:\n acc.append(')')\n rev -= 1\n return ''.join(acc)\n\n def compute_offset(self, with_timestamps: bool, nick_size: int) -> int:\n offset = 0\n theme = get_theme()\n if with_timestamps:\n if self.history:\n offset += 1 + theme.LONG_TIME_FORMAT_LENGTH\n else:\n offset += 1 + theme.SHORT_TIME_FORMAT_LENGTH\n\n if not self.nickname: # not a message, nothing to do afterwards\n return offset\n\n nick = truncate_nick(self.nickname, nick_size) or ''\n offset += poopt.wcswidth(nick)\n if self.ack:\n theme = get_theme()\n if self.ack > 0:\n offset += poopt.wcswidth(theme.CHAR_ACK_RECEIVED) + 1\n else:\n offset += poopt.wcswidth(theme.CHAR_NACK) + 1\n if self.me:\n offset += 3\n else:\n offset += 2\n if self.revisions:\n offset += ceil(log10(self.revisions + 1))\n return offset\n" }, { "alpha_fraction": 0.7200000286102295, "alphanum_fraction": 0.7232298254966736, "avg_line_length": 30.94444465637207, "blob_id": "025ea604a3a5c8309a709e469edb07ddb6f687e3", "content_id": "34c01b1fcf4a87eb700409c81d1bf935499a6e01", "detected_licenses": [ "CC-BY-2.0", "Zlib" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 4042, "license_type": "permissive", "max_line_length": 82, "num_lines": 126, "path": "/README.rst", "repo_name": "hrnciar/poezio", "src_encoding": "UTF-8", "text": "poezio\n======\n\nHomepage: https://poez.io\n\nForge Page: https://lab.louiz.org/poezio/poezio\n\nPoezio is a console Jabber/XMPP client. Its goal is to use anonymous\nconnections to simply let the user join MultiUserChats. This way, the user\ndoesn't have to create a Jabber account, exactly like people are using\nIRC. Poezio's commands are designed to be (if possible) like commonly\nused IRC clients (weechat, irssi, etc).\n\nSince version 0.7, poezio can handle real Jabber accounts along with\nroster and one-to-one conversations, making it a full-featured console\nJabber client, but still MultiUserChats-centered.\nIn the future, poezio should implement at a 100% level all XEP related to\nMUCs, especially XEP 0045.\n\nInstall\n=======\n\nYou need python 3.5 or higher (preferably the latest) and the associated devel\npackage, to build C modules, and the slixmpp python library.\nYou also need aiodns if you want SRV record support.\n\nAdditionally, you’ll need sphinx to build the documentation pages.\nTo read the documentation without these dependancies just read the rst\nfiles in the doc/source/ directory or the generated documentation on the\nwebsite.\n\nThe simplest way to have up-to-date dependencies and to be able to test\nthis developement version is to use the ``update.sh`` script that downloads\nthem, places them in the right directory, and builds the C module.\n\nYou can then launch poezio with\n\n::\n\n $ ./launch.sh\n\nyou can now simply launch ``poezio``\n\nYou can edit the configuration file which is located in\n``~/.config/poezio/poezio.cfg`` by default, and you will have to copy\nand edit ``data/default_config.cfg`` if you want to edit the config before\nthe first launch. The default config file is fully commented, but you can\nalso read the “Configuration” documentation page which has links between\noptions and longer descriptions.\n\nPlease see the online documentation for more information on installing,\nconfiguring or using poezio: https://doc.poez.io/\n\nIf you still have questions, or if you're lost, don't hesitate to come\ntalk to us directly on our Jabber chat room (see Contact section).\n\nPlease DO report any bug you encounter and ask for any feature you want\n(we may implement it or not, but it’s always better to ask).\n\nAuthors\n=======\n\n- Florent Le Coz (louiz’) <[email protected]> (developer)\n- Mathieu Pasquet (mathieui) <[email protected]> (developer)\n- Emmanuel Gil Peyrot (Link Mauve) <[email protected]> (developer)\n\nContact/support\n===============\n\nJabber ChatRoom: `[email protected] <xmpp:[email protected]?join>`_\n\nReport a bug: https://lab.louiz.org/poezio/poezio/issues/new\n\nLicense\n=======\n\nPoezio is Free Software.\n(learn more: http://www.gnu.org/philosophy/free-sw.html)\n\nPoezio is released under the zlib License.\nPlease read the COPYING file for details.\n\nThe artwork logo was made by Gaëtan Ribémont and released under\nthe Creative Commons BY license (http://creativecommons.org/licenses/by/2.0/)\n\n\nHacking\n=======\n\nIf you want to contribute, you will be welcome on\n`[email protected] <xmpp:[email protected]?join>`_ to announce your\nideas, what you are going to do, or to seek help if you have trouble\nunderstanding some of the code.\n\nThe preferred way to submit changes is through a merge request on gitlab,\nat https://lab.louiz.org/poezio/poezio, but we also accept contributions\non github, or with a simple “please fetch my code on my personal git\nrepository hosted somewhere”\n\n\nThanks\n======\n\n- People:\n - Todd Eisenberger - Plugin system and OTR support\n - Jérôme Parment (Manfraid) - Code, testing\n - Akim Sadaoui - Code\n - Florian Duraffourg - Code\n - Frédéric Meynadier - Code\n - Georg Lukas - Code\n - Johannes Krude - Code\n - Łabędź - Code\n - Lasse Aagren - Code\n - Lancelot SIX - Code\n - Luke Marlin - Code\n - Maxime Buquet - Code\n - Nicolas Braud-Santoni - Code\n - Perdu - Code\n - Eijebong - Code\n - Gaëtan Ribémont - Logo design\n - Ovart - Testing\n - Koshie - Donation\n - Gapan - Makefile\n - FlashCode (weechat dev) - Useful advices on how to use ncurses efficiently\n - And all the people using and testing poezio, and especially the ones present\n on the jabber chatroom doing bug reports and/or feature requests.\n" }, { "alpha_fraction": 0.5064987540245056, "alphanum_fraction": 0.511939525604248, "avg_line_length": 31.119741439819336, "blob_id": "22b477ba71660206575bdc1a1571cf5f48806a76", "content_id": "579639e3e30031b44df9c16e1feeae61a2b0eb99", "detected_licenses": [ "CC-BY-2.0", "Zlib" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9938, "license_type": "permissive", "max_line_length": 79, "num_lines": 309, "path": "/poezio/logger.py", "repo_name": "hrnciar/poezio", "src_encoding": "UTF-8", "text": "# Copyright 2010-2011 Florent Le Coz <[email protected]>\n#\n# This file is part of Poezio.\n#\n# Poezio is free software: you can redistribute it and/or modify\n# it under the terms of the zlib license. See the COPYING file.\n\"\"\"\nThe logger module that handles logging of the poezio\nconversations and roster changes\n\"\"\"\n\nimport mmap\nimport re\nfrom typing import List, Dict, Optional, IO, Any\nfrom datetime import datetime\n\nfrom poezio import common\nfrom poezio.config import config\nfrom poezio.xhtml import clean_text\nfrom poezio.theming import dump_tuple, get_theme\n\nimport logging\n\nlog = logging.getLogger(__name__)\n\nfrom poezio.config import LOG_DIR as log_dir\n\nMESSAGE_LOG_RE = re.compile(r'^MR (\\d{4})(\\d{2})(\\d{2})T'\n r'(\\d{2}):(\\d{2}):(\\d{2})Z '\n r'(\\d+) <([^ ]+)>  (.*)$')\nINFO_LOG_RE = re.compile(r'^MI (\\d{4})(\\d{2})(\\d{2})T'\n r'(\\d{2}):(\\d{2}):(\\d{2})Z '\n r'(\\d+) (.*)$')\n\n\nclass LogItem:\n def __init__(self, year, month, day, hour, minute, second, nb_lines,\n message):\n self.time = datetime(\n int(year), int(month), int(day), int(hour), int(minute),\n int(second))\n self.nb_lines = int(nb_lines)\n self.text = message\n\n\nclass LogInfo(LogItem):\n def __init__(self, *args):\n LogItem.__init__(self, *args)\n\n\nclass LogMessage(LogItem):\n def __init__(self, year, month, day, hour, minute, seconds, nb_lines, nick,\n message):\n LogItem.__init__(self, year, month, day, hour, minute, seconds,\n nb_lines, message)\n self.nick = nick\n\n\ndef parse_log_line(msg: str, jid: str) -> Optional[LogItem]:\n match = re.match(MESSAGE_LOG_RE, msg)\n if match:\n return LogMessage(*match.groups())\n match = re.match(INFO_LOG_RE, msg)\n if match:\n return LogInfo(*match.groups())\n log.debug('Error while parsing %s’s logs: “%s”', jid, msg)\n return None\n\n\nclass Logger:\n \"\"\"\n Appends things to files. Error/information/warning logs\n and also log the conversations to logfiles\n \"\"\"\n\n def __init__(self):\n self._roster_logfile = None # Optional[IO[Any]]\n # a dict of 'groupchatname': file-object (opened)\n self._fds: Dict[str, IO[Any]] = {}\n\n def __del__(self):\n for opened_file in self._fds.values():\n if opened_file:\n try:\n opened_file.close()\n except: # Can't close? too bad\n pass\n\n def close(self, jid) -> None:\n jid = str(jid).replace('/', '\\\\')\n if jid in self._fds:\n self._fds[jid].close()\n log.debug('Log file for %s closed.', jid)\n del self._fds[jid]\n return None\n\n def reload_all(self) -> None:\n \"\"\"Close and reload all the file handles (on SIGHUP)\"\"\"\n for opened_file in self._fds.values():\n if opened_file:\n opened_file.close()\n log.debug('All log file handles closed')\n for room in self._fds:\n self._check_and_create_log_dir(room)\n log.debug('Log handle for %s re-created', room)\n return None\n\n def _check_and_create_log_dir(self, room: str,\n open_fd: bool = True) -> Optional[IO[Any]]:\n \"\"\"\n Check that the directory where we want to log the messages\n exists. if not, create it\n \"\"\"\n if not config.get_by_tabname('use_log', room):\n return None\n try:\n log_dir.mkdir(parents=True, exist_ok=True)\n except OSError as e:\n log.error('Unable to create the log dir', exc_info=True)\n except:\n log.error('Unable to create the log dir', exc_info=True)\n return None\n if not open_fd:\n return None\n filename = log_dir / room\n try:\n fd = filename.open('a', encoding='utf-8')\n self._fds[room] = fd\n return fd\n except IOError:\n log.error(\n 'Unable to open the log file (%s)', filename, exc_info=True)\n return None\n\n def log_message(self,\n jid: str,\n nick: str,\n msg: str,\n date: Optional[datetime] = None,\n typ: int = 1) -> bool:\n \"\"\"\n log the message in the appropriate jid's file\n type:\n 0 = Don’t log\n 1 = Message\n 2 = Status/whatever\n \"\"\"\n if not config.get_by_tabname('use_log', jid):\n return True\n logged_msg = build_log_message(nick, msg, date=date, typ=typ)\n if not logged_msg:\n return True\n jid = str(jid).replace('/', '\\\\')\n if jid in self._fds.keys():\n fd = self._fds[jid]\n else:\n option_fd = self._check_and_create_log_dir(jid)\n if option_fd is None:\n return True\n fd = option_fd\n filename = log_dir / jid\n try:\n fd.write(logged_msg)\n except OSError:\n log.error(\n 'Unable to write in the log file (%s)',\n filename,\n exc_info=True)\n return False\n else:\n try:\n fd.flush() # TODO do something better here?\n except OSError:\n log.error(\n 'Unable to flush the log file (%s)',\n filename,\n exc_info=True)\n return False\n return True\n\n def log_roster_change(self, jid: str, message: str) -> bool:\n \"\"\"\n Log a roster change\n \"\"\"\n if not config.get_by_tabname('use_log', jid):\n return True\n self._check_and_create_log_dir('', open_fd=False)\n filename = log_dir / 'roster.log'\n if not self._roster_logfile:\n try:\n self._roster_logfile = filename.open('a', encoding='utf-8')\n except IOError:\n log.error(\n 'Unable to create the log file (%s)',\n filename,\n exc_info=True)\n return False\n try:\n str_time = common.get_utc_time().strftime('%Y%m%dT%H:%M:%SZ')\n message = clean_text(message)\n lines = message.split('\\n')\n first_line = lines.pop(0)\n nb_lines = str(len(lines)).zfill(3)\n self._roster_logfile.write(\n 'MI %s %s %s %s\\n' % (str_time, nb_lines, jid, first_line))\n for line in lines:\n self._roster_logfile.write(' %s\\n' % line)\n self._roster_logfile.flush()\n except:\n log.error(\n 'Unable to write in the log file (%s)',\n filename,\n exc_info=True)\n return False\n return True\n\n\ndef build_log_message(nick: str,\n msg: str,\n date: Optional[datetime] = None,\n typ: int = 1) -> str:\n \"\"\"\n Create a log message from a nick, a message, optionally a date and type\n message types:\n 0 = Don’t log\n 1 = Message\n 2 = Status/whatever\n \"\"\"\n if not typ:\n return ''\n\n msg = clean_text(msg)\n time = common.get_utc_time() if date is None else common.get_utc_time(date)\n str_time = time.strftime('%Y%m%dT%H:%M:%SZ')\n prefix = 'MR' if typ == 1 else 'MI'\n lines = msg.split('\\n')\n first_line = lines.pop(0)\n nb_lines = str(len(lines)).zfill(3)\n if nick:\n nick = '<' + nick + '>'\n logged_msg = '%s %s %s %s  %s\\n' % (prefix, str_time, nb_lines, nick,\n first_line)\n else:\n logged_msg = '%s %s %s %s\\n' % (prefix, str_time, nb_lines, first_line)\n return logged_msg + ''.join(' %s\\n' % line for line in lines)\n\n\ndef _get_lines_from_fd(fd: IO[Any], nb: int = 10) -> List[str]:\n \"\"\"\n Get the last log lines from a fileno\n \"\"\"\n with mmap.mmap(fd.fileno(), 0, prot=mmap.PROT_READ) as m:\n # start of messages begin with MI or MR, after a \\n\n pos = m.rfind(b\"\\nM\") + 1\n # number of message found so far\n count = 0\n while pos != 0 and count < nb - 1:\n count += 1\n pos = m.rfind(b\"\\nM\", 0, pos) + 1\n lines = m[pos:].decode(errors='replace').splitlines()\n return lines\n\n\ndef parse_log_lines(lines: List[str], jid: str) -> List[Dict[str, Any]]:\n \"\"\"\n Parse raw log lines into poezio log objects\n \"\"\"\n messages = []\n color = '\\x19%s}' % dump_tuple(get_theme().COLOR_LOG_MSG)\n\n # now convert that data into actual Message objects\n idx = 0\n while idx < len(lines):\n if lines[idx].startswith(' '): # should not happen ; skip\n idx += 1\n log.debug('fail?')\n continue\n log_item = parse_log_line(lines[idx], jid)\n idx += 1\n if not isinstance(log_item, LogItem):\n log.debug('wrong log format? %s', log_item)\n continue\n message_lines = []\n message = {\n 'history': True,\n 'time': common.get_local_time(log_item.time)\n }\n size = log_item.nb_lines\n if isinstance(log_item, LogInfo):\n message_lines.append(color + log_item.text)\n elif isinstance(log_item, LogMessage):\n message['nickname'] = log_item.nick\n message_lines.append(color + log_item.text)\n while size != 0 and idx < len(lines):\n message_lines.append(lines[idx][1:])\n size -= 1\n idx += 1\n message['txt'] = '\\n'.join(message_lines)\n messages.append(message)\n return messages\n\n\ndef create_logger() -> None:\n \"Create the global logger object\"\n global logger\n logger = Logger()\n\n\nlogger = None # type: Logger\n" } ]
7
isando3/B2GEfficiencyPlots
https://github.com/isando3/B2GEfficiencyPlots
b7e071c41943ccf60fb94db5a3de790b1a867ab7
8f8e32bdebd3efc7ffba2328fb9d34001ced4035
3d8bbe501d2640da0daf03f2829b0692a4ddf657
refs/heads/master
2021-01-19T00:47:32.764001
2016-01-20T20:44:43
2016-01-20T20:44:43
30,425,889
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5001327395439148, "alphanum_fraction": 0.6190602779388428, "avg_line_length": 27.111940383911133, "blob_id": "0046b67de4fa9763fc0ae9616a40951369c97bd1", "content_id": "60492e44112cf1f49e79bef20751f88c557e0972", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3767, "license_type": "no_license", "max_line_length": 216, "num_lines": 134, "path": "/makeEffplots.py", "repo_name": "isando3/B2GEfficiencyPlots", "src_encoding": "UTF-8", "text": "import sys\nimport re\nfrom ROOT import *\nfrom array import array\nimport tdrstyle, CMS_lumi\n\n#tdrstyle.setTDRStyle()\n\nH_ref = 600; \nW_ref = 800; \nW = W_ref\nH = H_ref\n\nf1 = TFile(sys.argv[1])\nsample = sys.argv[1].split('.root') #Zprime2\nf2 = TFile(sys.argv[2])\nsample2 = sys.argv[2].split('.root') #Zprime3\n\n\nhname = sys.argv[3]\nhn = 'h_'+hname\nhnum = hn+'num'\nhden = hn+'den'\nh_num = f1.Get(hnum)\nh_den = f1.Get(hden)\nh2_num = f2.Get(hnum)\nh2_den = f2.Get(hden)\n\n#c1 = TCanvas('c1',\"Plot\",1)\nT = 0.08*H_ref\nB = 0.12*H_ref \nL = 0.12*W_ref\nR = 0.04*W_ref\n\nc1 = TCanvas(\"c2\",\"c2\",50,50,W,H)\nc1.SetFillColor(0)\nc1.SetBorderMode(0)\nc1.SetFrameFillStyle(0)\nc1.SetFrameBorderMode(0)\nc1.SetLeftMargin( L/W )\nc1.SetRightMargin( R/W )\nc1.SetTopMargin( T/H )\nc1.SetBottomMargin( B/H )\nc1.SetTickx(0)\nc1.SetTicky(0)\nCMS_lumi.extraText = \"Preliminary\"\nif hname == 'epT':\n#if hname == 'leadingjetpT':\n c1.cd() \n gStyle.SetOptTitle(1)\n gStyle.SetOptStat(0)\n gStyle.SetTitleFontSize(0.1)\n c1.Modified()\n CMS_lumi.CMS_lumi(c1,4,11)\n c1.Update()\n #p = h_num.Rebin(3)\n #q = h_den.Rebin(3)\n x = array(\"d\",[0.,10.,20.,30.,40.,50.,60.,70.,80.,90.,100.,120.,140.,160.,180.,200.,250.,300.,350.,400.,450.,500.])\n #x = array(\"d\",[0.,10.,20.,30.,40.,50.,60.,70.,80.,90.,100.,120.,140.,160.,180.,200.,250.,300.,350.,400.,450.,500.,650.,700.,750.,800.,850.,900.,1000.,1100.,1200.,1300.,1400.,1500.,1600.,1700.,1800.,1900.,2000.])\n p = TH1F()\n q = TH1F()\n p2 = TH1F()\n q2 = TH1F()\n p = h_num.Rebin(21,\"p\",x)#21,38\n q = h_den.Rebin(21,\"q\",x)\n #p2 = h_num.Rebin(21,\"p2\",x)\n #q2 = h_den.Rebin(21,\"q2\",x)\n eff = TEfficiency(p,q)\n #eff2 = TEfficiency(p2,q2)\n eff.SetMarkerStyle(20)\n #eff2.SetMarkerStyle(20)\n #eff.SetMarkerColor(kBlue)\n #eff2.SetMarkerStyle(kRed)\n eff.SetTitle(\"ZPrimeToTTJets_M3000GeV_W30GeV; pT_{e} [GeV];Eff [HLTEle45CaloIdVTGsfTrkIdTPFJet200PFJet50v1]\")\n ##HOW TO DRAW Efficiency plots from a range 0 to 1 using TGraphAssymErrors, need to use .Paint() and .GetPaintedGraph() to then have access to .SetRangeUser()\n tgraph2 = TGraphAsymmErrors()\n gPad.Update()\n eff.Draw()\n eff.Paint(\"\")\n tgraph2 = eff.GetPaintedGraph()\n tgraph2.GetYaxis().SetRangeUser(0.,1.1)\n tgraph2.SetLineColor(0)\n tgraph2.SetMarkerStyle(20)\n#tgraph1.SetMinimum(0)\n tgraph2.Draw(\"P\") \n c1.Update()\n c1.SetGrid()\n gStyle.SetTitleFontSize(0.1)\n c1.Update()\n c1.Modified()\n CMS_lumi.CMS_lumi(c1,4,11)\n text = TLatex()\n text.SetNDC()\n text.DrawText(0.4,0.95,hname)\n c1.SaveAs('Efficiency_'+hname+'_'+sample[0]+'_'+sample2[0]+'.png')\nelif hname == 'mupT':\n c1.cd() \n gStyle.SetOptTitle(1)\n gStyle.SetOptStat(0)\n gStyle.SetTitleFontSize(0.1)\n c1.Modified()\n CMS_lumi.CMS_lumi(c1,4,11)\n c1.Update()\n #p = h_num.Rebin(3)\n #q = h_den.Rebin(3)\n x = array(\"d\",[0.,10.,20.,30.,40.,50.,60.,70.,80.,90.,100.,120.,140.,160.,180.,200.,250.,300.,350.,400.,450.,500.])\n p = TH1F()\n q = TH1F()\n p = h_num.Rebin(21,\"p\",x)\n q = h_den.Rebin(21,\"q\",x)\n eff = TEfficiency(p,q)\n eff.SetMarkerStyle(20)\n eff.SetTitle(\"ZPrimeToTTJets_M3000GeV_W30GeV; pT_{\\mu} [GeV];Eff [HLTMu40e2p1PFJet200PFJet50v1]\")\n #eff.Draw()\n tgraph2 = TGraphAsymmErrors()\n gPad.Update()\n eff.Draw()\n eff.Paint(\"\")\n tgraph2 = eff.GetPaintedGraph()\n tgraph2.GetYaxis().SetRangeUser(0.,1.1)\n tgraph2.SetLineColor(0)\n tgraph2.SetMarkerStyle(20)\n#tgraph1.SetMinimum(0)\n tgraph2.Draw(\"P\") \n c1.Update()\n c1.SetGrid()\n gStyle.SetTitleFontSize(0.1)\n c1.Update()\n c1.Modified()\n CMS_lumi.CMS_lumi(c1,4,11)\n text = TLatex()\n text.SetNDC()\n text.DrawText(0.4,0.95,hname)\n c1.SaveAs('Efficiency'+hname+sample[0]+'.png')\n" }, { "alpha_fraction": 0.6087534427642822, "alphanum_fraction": 0.6658915281295776, "avg_line_length": 28.65517234802246, "blob_id": "3f3d581ece61d4462f777a4c4d60c79ea84d0c13", "content_id": "5cec19662e5255d3bf9ef51c97508a5cbf896630", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12041, "license_type": "no_license", "max_line_length": 187, "num_lines": 406, "path": "/TagNProbeEfficiency.py", "repo_name": "isando3/B2GEfficiencyPlots", "src_encoding": "UTF-8", "text": "# python TagNProbeEfficiency.py --file=uhh2.AnalysisModuleRunner.MC.ZP3000w30.root --channel=e --tag=HLT_Mu45_eta2p1 --probe=HLT_Ele45_..._PFJet200PFJet50 --useOR=True --sample=ZP3000w30\n\n\n\nimport re\nimport sys\nfrom ROOT import *\nfrom array import array\nimport tdrstyle , CMS_lumi\n\nfrom optparse import OptionParser\nparser = OptionParser()\nimport glob\nimport os\n\n# _ __ __ _ _ __ ___ ___ _ __ \n#| '_ \\ / _` | '__/ __|/ _ \\ '__|\n#| |_) | (_| | | \\__ \\ __/ | \n#| .__/ \\__,_|_| |___/\\___|_| \n#| | \n#|_| \n#\nparser.add_option('--channel', type='string', action='store',\n default='e',\n dest='channel',\n help='mu or e?')\n\nparser.add_option('--tag', type='string', action='store',\n default='HLT_Mu45_eta2p1_v1',\n dest='tag',\n help='tag trigger')\n\nparser.add_option('--probe', type='string', action='store',\n default='HLT_Ele45PFJet200PFJet50',\n dest='probe',\n help='probe trigger')\n\nparser.add_option('--ORprobe', type='string', action='store',\n default='HLT_PFHT900_v1',\n dest='ORprobe',\n help='or probe trigger')\n\nparser.add_option('--useOR', type='string', action='store',\n default='False',\n dest='useOR',\n help='Use Or combination or trigger?')\n\nparser.add_option('--sample', type='string', action='store',\n default='TTbar',\n dest='sample',\n help='Sample used:')\n\nparser.add_option('--file', type='string', action='store',\n default='uhh.AnalysisRunner.TTbar.root',\n dest='file',\n help='File to be analyzed')\n\n\n\n(options, args) = parser.parse_args()\nargv = []\n\n\n\n\n#type here an example of how to use this script: \n\n#Setting up size of canvas\nH_ref = 600; \nW_ref = 800; \nW = W_ref\nH = H_ref\n#c1 = TCanvas('c1',\"Plot\",1)\nT = 0.08*H_ref\nB = 0.12*H_ref \nL = 0.12*W_ref\nR = 0.04*W_ref\nc1 = TCanvas(\"c1\",\"c1\",50,50,W,H)\nc1.SetFillColor(0)\nc1.SetBorderMode(0)\nc1.SetFrameFillStyle(0)\nc1.SetFrameBorderMode(0)\nc1.SetLeftMargin( L/W )\nc1.SetRightMargin( R/W )\nc1.SetTopMargin( T/H )\nc1.SetBottomMargin( B/H )\nc1.SetTickx(0)\nc1.SetTicky(0)\nCMS_lumi.extraText = \"Preliminary\"\n\n\n#file & input histograms\n\nf1 = TFile(options.file)\nfout = TFile(\"data.root\", \"recreate\")\nif options.channel == 'e':\n leptag_pt = f1.Get('trigtag/ele1__pt')\n leptag_eta= f1.Get('trigtag/ele1__eta')\n leptag_minDR_jet = f1.Get('trigtag/ele1__minDR_jet')\n lepprobe_pt = f1.Get('trigprobe/ele1__pt')\n lepprobe_eta= f1.Get('trigprobe/ele1__eta')\n lepprobe_minDR_jet =f1.Get('trigprobe/ele1__minDR_jet')\nelif options.channel == 'mu':\n leptag_pt = f1.Get('trigtag/muo1__pt')\n leptag_eta= f1.Get('trigtag/muo1__eta')\n leptag_minDR_jet = f1.Get('trigtag/muo1__minDR_jet')\n lepprobe_pt = f1.Get('trigprobe/muo1__pt')\n lepprobe_eta= f1.Get('trigprobe/muo1__eta')\n lepprobe_minDR_jet =f1.Get('trigprobe/muo1__minDR_jet')\njet1tag_pt=f1.Get('trigtag/jet1__pt')\njet1tag_eta=f1.Get('trigtag/jet1__eta')\njet2tag_pt=f1.Get('trigtag/jet2__pt')\njet2tag_eta = f1.Get('trigtag/jet2__eta')\njet1probe_pt=f1.Get('trigprobe/jet1__pt')\njet1probe_eta=f1.Get('trigprobe/jet1__eta')\njet2probe_pt=f1.Get('trigprobe/jet2__pt')\njet2probe_eta =f1.Get('trigprobe/jet2__eta')\n\n## Update canvas \ngROOT.SetBatch(kTRUE)\nc1.cd()\ngStyle.SetOptTitle(0)\ngStyle.SetOptStat(0)\ngStyle.SetTitleFontSize(0.1)\nc1.Modified()\nCMS_lumi.CMS_lumi(c1,4,11)\nc1.Update()\nuseOR = options.useOR\np = TH1F()\nq = TH1F()\n#lep pt \nx= array(\"d\",[0.,25.,50.,75.,100.,125.,150.,200.,300.,900.])\np=lepprobe_pt.Rebin(9,\"lepprobe_pt\",x)\nq=leptag_pt.Rebin(9,\"leptag_pt\",x)\neff_lep_pt = TEfficiency(p,q)\neff_lep_pt.SetMarkerStyle(20)\nif useOR =='True':\n title_lep_pt = 'TagNProbe; pT_{'+options.channel+'}[GeV]; Eff['+options.probe+'||'+options.ORprobe+']'\nelse:\n title_lep_pt = 'TagNProbe; pT_{'+options.channel+'}[GeV]; Eff['+options.probe+']'\neff_lep_pt.SetTitle(title_lep_pt)\ntgraph2 = TGraphAsymmErrors()\neff_lep_pt.Draw()\neff_lep_pt.Paint(\"\")\ntgraph2 = eff_lep_pt.GetPaintedGraph()\ntgraph2.GetYaxis().SetRangeUser(0.,1.1)\ntgraph2.SetLineColor(0)\ntgraph2.SetMarkerStyle(20)\nfit = tgraph2.Fit(\"pol0\",'S')\nvalue = fit.Parameter(0)\nerror = fit.ParError(0)\n#print value, error\ntgraph2.Draw(\"P\")\nfout.WriteObject(tgraph2,\"ele_pt\")\nc1.Update()\nc1.SetGrid()\ngStyle.SetTitleFontSize(0.1)\nc1.Update()\nc1.Modified()\nCMS_lumi.CMS_lumi(c1,4,11)\ntext = TLatex()\ntext.SetNDC()\nuppertitle = 'Tag-And-Probe('+ options.sample+')'\ntext.DrawText(0.3,0.95,uppertitle)\ntext2 = TLatex()\ntext2.SetNDC()\ntext2.SetTextSize(0.04)\ntext2.DrawText(0.2,0.45,'Eff:'+str(\"{0:.4f}\".format(value))+'+/-'+str(\"{0:.4f}\".format(error)))\nssleppt = 'TagNProbe_'+ options.sample+ '_' + options.channel + '_pt.png'\nc1.SaveAs(ssleppt)\nc1.Clear()\n#ele eta\nc1.cd()\nlepprobe_eta.Rebin(2)\nleptag_eta.Rebin(2)\neff_lep_eta = TEfficiency(lepprobe_eta,leptag_eta)\neff_lep_eta.SetMarkerStyle(20)\nif useOR=='True':\n title_lep_eta = 'TagNProbe; \\eta_{'+options.channel+'}; Eff['+options.probe+'||'+options.ORprobe+']'\nelse:\n title_lep_eta = 'TagNProbe; \\eta_{'+options.channel+'}; Eff['+options.probe+']'\neff_lep_eta.SetTitle(title_lep_eta)\ntgraph2 = TGraphAsymmErrors()\neff_lep_eta.Draw()\neff_lep_eta.Paint(\"\")\ntgraph2 = eff_lep_eta.GetPaintedGraph()\ntgraph2.GetYaxis().SetRangeUser(0.,1.1)\ntgraph2.SetLineColor(0)\ntgraph2.SetMarkerStyle(20)\nfit = tgraph2.Fit(\"pol0\",'S')\nvalue = fit.Parameter(0)\nerror = fit.ParError(0)\ntgraph2.Draw(\"P\") \nc1.Update()\nc1.SetGrid()\ngStyle.SetTitleFontSize(0.1)\nc1.Update()\nc1.Modified()\nCMS_lumi.CMS_lumi(c1,4,11)\ntext = TLatex()\ntext.SetNDC()\ntext.DrawText(0.3,0.95,uppertitle)\ntext2 = TLatex()\ntext2.SetNDC()\ntext2.SetTextSize(0.04)\ntext2.DrawText(0.2,0.45,'Eff:'+str(\"{0:.4f}\".format(value))+'+/-'+str(\"{0:.4f}\".format(error)))\nsslepeta = 'TagNProbe_'+ options.sample+ '_' + options.channel + '_eta.png'\nc1.SaveAs(sslepeta)\nc1.Clear()\n#ele minDR\nc1.cd()\nleptag_minDR_jet.Rebin(2)\nlepprobe_minDR_jet.Rebin(2)\neff_lep_minDR = TEfficiency(lepprobe_minDR_jet,leptag_minDR_jet)\neff_lep_minDR.SetMarkerStyle(20)\nif useOR =='True':\n title_lep_minDR = 'TagNProbe; min \\Delta R_{'+options.channel+'-jet}; Eff['+options.probe+'||'+options.ORprobe+']'\nelse:\n title_lep_minDR = 'TagNProbe; min \\Delta R_{'+options.channel+'-jet}; Eff['+options.probe+']'\neff_lep_minDR.SetTitle(title_lep_minDR)\ntgraph2 = TGraphAsymmErrors()\neff_lep_minDR.Draw()\neff_lep_minDR.Paint(\"\")\ntgraph2 = eff_lep_minDR.GetPaintedGraph()\ntgraph2.GetYaxis().SetRangeUser(0.,1.1)\ntgraph2.SetLineColor(0)\ntgraph2.SetMarkerStyle(20)\nfit = tgraph2.Fit(\"pol0\",'S')\nvalue = fit.Parameter(0)\nerror = fit.ParError(0)\ntgraph2.Draw(\"P\") \nfout.WriteObject(tgraph2,\"minDR\")\nc1.Update()\nc1.SetGrid()\ngStyle.SetTitleFontSize(0.1)\nc1.Update()\nc1.Modified()\nCMS_lumi.CMS_lumi(c1,4,11)\ntext = TLatex()\ntext.SetNDC()\ntext.DrawText(0.3,0.95,uppertitle)\ntext2 = TLatex()\ntext2.SetNDC()\ntext2.SetTextSize(0.04)\ntext2.DrawText(0.2,0.45,'Eff:'+str(\"{0:.4f}\".format(value))+'+/-'+str(\"{0:.4f}\".format(error)))\nsslepDR = 'TagNProbe_'+ options.sample+ '_' + options.channel + '_minDR.png'\nc1.SaveAs(sslepDR)\nc1.Clear()\n#jet1_pt\nj1probe = TH1F()\nj1tag = TH1F()\nj1pt = array(\"d\",[250.,350.,450.,650.,1000.])\nj1probe = jet1probe_pt.Rebin(4,\"j1probe\",j1pt)\nj1tag = jet1tag_pt.Rebin(4,\"j1tag\",j1pt)\neff_jet1_pt = TEfficiency(j1probe,j1tag)\neff_jet1_pt.SetMarkerStyle(20)\nif useOR =='True':\n title_jet1pt = 'TagNProbe; pT_{jet1}[GeV]; Eff['+options.probe+'||'+options.ORprobe+']'\nelse:\n title_jet1pt = 'TagNProbe; pT_{jet}[GeV]; Eff['+options.probe+']'\neff_jet1_pt.SetTitle(title_jet1pt)\ntgraph2 = TGraphAsymmErrors()\neff_jet1_pt.Draw()\neff_jet1_pt.Paint(\"\")\ntgraph2 = eff_jet1_pt.GetPaintedGraph()\ntgraph2.GetYaxis().SetRangeUser(0.,1.1)\ntgraph2.SetLineColor(0)\ntgraph2.SetMarkerStyle(20)\nfit = tgraph2.Fit(\"pol0\",'S')\nvalue = fit.Parameter(0)\nerror = fit.ParError(0)\ntgraph2.Draw(\"P\") \nfout.WriteObject(tgraph2,\"jet1pt\")\nc1.Update()\nc1.SetGrid()\ngStyle.SetTitleFontSize(0.1)\nc1.Update()\nc1.Modified()\nCMS_lumi.CMS_lumi(c1,4,11)\ntext = TLatex()\ntext.SetNDC()\ntext.DrawText(0.3,0.95,uppertitle)\ntext2 = TLatex()\ntext2.SetNDC()\ntext2.SetTextSize(0.04)\ntext2.DrawText(0.2,0.45,'Eff:'+str(\"{0:.4f}\".format(value))+'+/-'+str(\"{0:.4f}\".format(error)))\nssjet1pt = 'TagNProbe_'+ options.sample+ '_' + options.channel + '_jet1pt.png'\nc1.SaveAs(ssjet1pt)\nc1.Clear()\n#jet1_eta\njet1probe_eta.Rebin(3)\njet1tag_eta.Rebin(3)\neff_jet1_eta = TEfficiency(jet1probe_eta,jet1tag_eta)\neff_jet1_eta.SetMarkerStyle(20)\nif useOR == 'True':\n title_jet1eta = 'TagNProbe; \\eta_{jet1}; Eff['+options.probe+'||'+options.ORprobe+']'\nelse:\n title_jet1eta = 'TagNProbe; \\eta_{jet}; Eff['+options.probe+']'\neff_jet1_eta.SetTitle(title_jet1eta)\ntgraph2 = TGraphAsymmErrors()\neff_jet1_eta.Draw()\neff_jet1_eta.Paint(\"\")\ntgraph2 = eff_jet1_eta.GetPaintedGraph()\ntgraph2.GetYaxis().SetRangeUser(0.,1.1)\ntgraph2.SetLineColor(0)\ntgraph2.SetMarkerStyle(20)\nfit = tgraph2.Fit(\"pol0\",'S')\nvalue = fit.Parameter(0)\nerror = fit.ParError(0)\ntgraph2.Draw(\"P\") \nc1.Update()\nc1.SetGrid()\ngStyle.SetTitleFontSize(0.1)\nc1.Update()\nc1.Modified()\nCMS_lumi.CMS_lumi(c1,4,11)\ntext = TLatex()\ntext.SetNDC()\ntext.DrawText(0.3,0.95, uppertitle)\ntext2 = TLatex()\ntext2.SetNDC()\ntext2.SetTextSize(0.04)\ntext2.DrawText(0.2,0.45,'Eff:'+str(\"{0:.4f}\".format(value))+'+/-'+str(\"{0:.4f}\".format(error)))\nssjet1eta = 'TagNProbe_'+ options.sample+ '_' + options.channel + '_jet1eta.png'\nc1.SaveAs(ssjet1eta)\nc1.Clear()\n#jet2_pt\n#jet2probe_pt.Rebin(3)\n#jet2tag_pt.Rebin(3)\nj2pt = array(\"d\",[75.,100.,150.,250.,350.,450.,650.,1000.])\nj2probe = jet2probe_pt.Rebin(7,\"j2probe\",j2pt)\nj2tag = jet2tag_pt.Rebin(7,\"j2tag\",j2pt)\neff_jet2_pt = TEfficiency(j2probe,j2tag)\neff_jet2_pt.SetMarkerStyle(20)\nif useOR == 'True':\n title_jet2pt = 'TagNProbe; pT_{jet1}[GeV]; Eff['+options.probe+'||'+options.ORprobe+']'\nelse:\n title_jet2pt = 'TagNProbe; pT_{jet2} [GeV]; Eff['+options.probe+']'\neff_jet2_pt.SetTitle(title_jet2pt)\ntgraph2 = TGraphAsymmErrors()\neff_jet2_pt.Draw()\neff_jet2_pt.Paint(\"\")\ntgraph2 = eff_jet2_pt.GetPaintedGraph()\ntgraph2.GetYaxis().SetRangeUser(0.,1.1)\ntgraph2.SetLineColor(0)\ntgraph2.SetMarkerStyle(20)\nfit = tgraph2.Fit(\"pol0\",'S')\nvalue = fit.Parameter(0)\nerror = fit.ParError(0)\ntgraph2.Draw(\"P\") \nfout.WriteObject(tgraph2,\"jet2pt\")\nc1.Update()\nc1.SetGrid()\ngStyle.SetTitleFontSize(0.1)\nc1.Update()\nc1.Modified()\nCMS_lumi.CMS_lumi(c1,4,11)\ntext = TLatex()\ntext.SetNDC()\ntext.DrawText(0.3,0.95,uppertitle)\ntext2 = TLatex()\ntext2.SetNDC()\ntext2.SetTextSize(0.04)\ntext2.DrawText(0.2,0.45,'Eff:'+str(\"{0:.4f}\".format(value))+'+/-'+str(\"{0:.4f}\".format(error)))\nssjet2pt = 'TagNProbe_'+ options.sample+ '_' + options.channel + '_jet2pt.png'\nc1.SaveAs(ssjet2pt)\nc1.Clear()\n#jet2_eta\njet2probe_eta.Rebin(3)\njet2tag_eta.Rebin(3)\neff_jet2_eta = TEfficiency(jet2probe_eta,jet2tag_eta)\neff_jet2_eta.SetMarkerStyle(20)\nif useOR == 'True':\n title_jet2eta = 'TagNProbe; \\eta_{jet1}; Eff['+options.probe+'||'+options.ORprobe+']'\nelse:\n title_jet2eta = 'TagNProbe; \\eta_{jet2}; Eff['+options.probe+']'\neff_jet2_eta.SetTitle(title_jet2eta)\ntgraph2 = TGraphAsymmErrors()\neff_jet2_eta.Draw()\neff_jet2_eta.Paint(\"\")\ntgraph2 = eff_jet2_eta.GetPaintedGraph()\ntgraph2.GetYaxis().SetRangeUser(0.,1.1)\ntgraph2.SetLineColor(0)\ntgraph2.SetMarkerStyle(20)\nfit = tgraph2.Fit(\"pol0\",'S')\nvalue = fit.Parameter(0)\nerror = fit.ParError(0)\ntgraph2.Draw(\"P\") \nfout.WriteObject(tgraph2,\"jet2eta\")\nfout.Close()\nc1.Update()\nc1.SetGrid()\ngStyle.SetTitleFontSize(0.1)\nc1.Update()\nc1.Modified()\nCMS_lumi.CMS_lumi(c1,4,11)\ntext = TLatex()\ntext.SetNDC()\ntext.DrawText(0.3,0.95,uppertitle)\ntext2 = TLatex()\ntext2.SetNDC()\ntext2.SetTextSize(0.04)\ntext2.DrawText(0.2,0.45,'Eff:'+str(\"{0:.4f}\".format(value))+'+/-'+str(\"{0:.4f}\".format(error)))\nssjet2eta = 'TagNProbe_'+ options.sample+ '_' + options.channel + '_jet2eta.png'\nc1.SaveAs(ssjet2eta)\nc1.Clear()\n\n" }, { "alpha_fraction": 0.5633116960525513, "alphanum_fraction": 0.6390692591667175, "avg_line_length": 30.86206817626953, "blob_id": "3c2d2e8c6182fbda57f9595de775f09db005b7b1", "content_id": "f9ee1158e6b78257254c7637a7afe7cabbee9e68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1848, "license_type": "no_license", "max_line_length": 122, "num_lines": 58, "path": "/getscalefactor.py", "repo_name": "isando3/B2GEfficiencyPlots", "src_encoding": "UTF-8", "text": "import ROOT as R\nimport sys\nfrom math import sqrt, pow\nfrom array import array\n\n#R.SetOwnership(central, False)\n\nf1 = R.TFile(sys.argv[1],'r')#data\nf2 = R.TFile(sys.argv[2],'r')#MC\ngraph1 = f1.Get(sys.argv[3])\ngraph2 = f2.Get(sys.argv[4])\nfout = R.TFile(sys.argv[5], 'recreate')\n\nn = graph1.GetN()\nx1, y1, yerr_up1, yerr_down1, x2, y2, yerr_up2, yerr_down2, xerr_left, xerr_right = [], [], [], [], [], [], [],[], [], []\nx12, y12 , yerr_up12, yerr_down12, xerr_left12, xerr_right12 = [],[],[],[],[],[]\nfor i in range(n):\n tmpX1, tmpY1, tmpX2, tmpY2 = R.Double(0), R.Double(0), R.Double(0), R.Double(0)\n graph1.GetPoint(i, tmpX1, tmpY1)\n graph2.GetPoint(i, tmpX2, tmpY2)\n x1.append(tmpX1)\n x2.append(tmpX2)\n y1.append(tmpY1)\n yerr_up1.append(graph1.GetErrorYhigh(i))\n yerr_down1.append(graph1.GetErrorYlow(i))\n y2.append(tmpY2)\n yerr_up2.append(graph2.GetErrorYhigh(i))\n yerr_down2.append(graph2.GetErrorYlow(i))\n xerr_left.append(graph2.GetErrorXlow(i))\n xerr_right.append(graph2.GetErrorXhigh(i))\nprint len(x1)\nprint y1[0]\nprint y2[0]\ny12=[0.]*len(x1)\nyerr_down12 = [0.]*len(x1)\nyerr_up12 = [0.]*len(x1)\n\nfor j in xrange(len(x1)):\n y12[j] = y1[j]/y2[j]\n yerr_down12[j]= sqrt(pow(y12[j],2)*(pow(yerr_down1[j]/y1[j], 2)+pow(yerr_down2[j]/y2[j],2)))\n yerr_up12[j]= sqrt(pow(y12[j],2)*(pow(yerr_up1[j]/y1[j], 2)+pow(yerr_up2[j]/y2[j],2)))\n #x12[j] = x1[j]\n #xerr_left12 = xerr_left[j]\n #xerr_right12 = xerr_right[j]\n\nax1 = array(\"d\",x1)\nay12 = array(\"d\",y12)\naxel = array(\"d\",xerr_left)\naxer = array(\"d\", xerr_right)\nayd12 = array(\"d\", yerr_down12)\nayu12 = array(\"d\", yerr_up12)\nsf = R.TGraphAsymmErrors(len(x1),ax1,ay12,axel,axer,ayd12,ayu12)\nsf.SetTitle(\"Trigger Scale Factor\")\nsf.SetMarkerColor(4)\nsf.SetMarkerStyle(21)\nsf.Draw(\"ALP\")\nfout.WriteTObject(sf, \"ScaleFactor\")\nfout.Close\n" }, { "alpha_fraction": 0.5895611047744751, "alphanum_fraction": 0.6678528785705566, "avg_line_length": 34.125, "blob_id": "62aace018d5fbf458aea33b49939a4db41e821ee", "content_id": "a560693cb8be3d9e53986ae048406304767e6010", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 843, "license_type": "no_license", "max_line_length": 119, "num_lines": 24, "path": "/FitConfLevel.C", "repo_name": "isando3/B2GEfficiencyPlots", "src_encoding": "UTF-8", "text": "TFile * f = TFile::Open(\"scalefactor_j1pt.root\");\nTGraphAsymmErrors *g = (TGraphAsymmErrors*)f->Get(\"ScaleFactor\");\nTF1 *ff = new TF1(\"ff\",\"pol0\");\ng->Fit(\"ff\");\nTGraphErrors *grint = new TGraphErrors(4);\nfor (i=0; i<4; i++){ grint->SetPoint(i, g->GetX()[i], 0);(TVirtualFitter::GetFitter())->GetConfidenceIntervals(grint);}\ngrint->SetFillColor(kRed);\ngrint->SetFillStyle(3018);\ngrint->GetYaxis()->SetRangeUser(0.0,1.15);\ngrint->GetXaxis()->SetRangeUser(0.,900.);\nTCanvas * c1 = new TCanvas(\"c1\",\"c1\");\nc1->cd();\ngStyle->SetOptStat(1111111);\ngStyle->SetStatY(0.5); \ngStyle->SetStatX(0.5);\ngrint->Draw(\"a3\");\nc1->SetGridx();\nc1->SetGridy();\ng->Draw(\"same\");\nTFile out_file(\"scalefactor_fit.root\", \"RECREATE\");\nff.Write();\nout_file.Close();\n//TF1 *ff = new TF1(\"ff\",Erf_twosteps,250,1000,7);\n//ff->SetParameters(1.,0.05,0.2,400.,0.05,0.1,800.);\n" } ]
4
devmapal/gitolite-manager
https://github.com/devmapal/gitolite-manager
1aaeec73a6653388d2f5b54da8143d597363fac3
4a185f3101a176cdc6b350cb9e0944fb58f6321b
3b4c70de18d4f3d9994025cce5486d70caf14a1f
refs/heads/master
2021-01-14T12:57:26.351698
2015-04-09T12:52:51
2015-04-09T12:52:51
28,190,728
1
1
null
null
null
null
null
[ { "alpha_fraction": 0.6033287048339844, "alphanum_fraction": 0.6033287048339844, "avg_line_length": 18.486486434936523, "blob_id": "f38340e1a6f219a198f56ff36eb8913b5f2b6cac", "content_id": "c27e5cddf95ba9e6609fcd3a3a24fb18ce5e0c41", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 721, "license_type": "permissive", "max_line_length": 51, "num_lines": 37, "path": "/tests/gitolite.py", "repo_name": "devmapal/gitolite-manager", "src_encoding": "UTF-8", "text": "import unittest\n\nimport gitolite_manager\n\n\nclass ConfigFileTestCase(unittest.TestCase):\n def setUp(self):\n self.gitolite = gitolite_manager.Gitolite()\n\n def tearDown(self):\n pass\n\n def test_add_repo(self):\n self.assertEqual('','')\n\n def test_rm_repo(self):\n self.assertEqual('','')\n\n def test_get_repo(self):\n self.assertEqual('','')\n\n\nclass SSHKeyTestCase(unittest.TestCase):\n def setUp(self):\n self.gitolite = gitolite_manager.Gitolite()\n\n def tearDown(self):\n pass\n\n def test_add_key(self):\n self.assertEqual('','')\n\n def test_rm_key(self):\n self.assertEqual('','')\n\n def test_get_keys(self):\n self.assertEqual('','')\n" }, { "alpha_fraction": 0.6229867935180664, "alphanum_fraction": 0.6281113028526306, "avg_line_length": 30.76744270324707, "blob_id": "cadf9fd9e2e796fdfbf48cf76e0ba21ee3f3dc56", "content_id": "534dd8ceab5604bb27b7d26819591bec4f64a797", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1366, "license_type": "permissive", "max_line_length": 73, "num_lines": 43, "path": "/setup.py", "repo_name": "devmapal/gitolite-manager", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport os\n\nfrom gitolite_manager import __version__\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nf = open(os.path.join(os.path.dirname(__file__), 'README.md'))\nlong_description = f.read()\nf.close()\n\nsetup(\n name='gitolite-manager',\n version=__version__,\n description=\"Manage gitolite's configuration files and ssh keys\",\n long_description=long_description,\n url='https://github.com/smailq/gitolite-manager',\n download_url=('https://github.com/downloads/smailq/gitolite-manager/'\n 'gitolite-manager-%s.tar.gz' % __version__),\n author='Kyu Lee',\n author_email='[email protected]',\n maintainer='Kyu Lee',\n maintainer_email='[email protected]',\n keywords=['Gitolite', 'gitolite management'],\n license='MIT',\n packages=['gitolite_manager'],\n test_suite='tests.all_tests',\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.5',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Utilities'\n ]\n)\n" }, { "alpha_fraction": 0.5119975805282593, "alphanum_fraction": 0.5139471888542175, "avg_line_length": 26.44032859802246, "blob_id": "2d4749dede1eaadc49b4709c728119548dd36737", "content_id": "9bbdaa4abcf605e073aad4c56a85436a47efe73a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6668, "license_type": "permissive", "max_line_length": 85, "num_lines": 243, "path": "/gitolite_manager/gitolite.py", "repo_name": "devmapal/gitolite-manager", "src_encoding": "UTF-8", "text": "import os, glob, tempfile, shutil, re\n\n\nclass Gitolite(object):\n\n def __init__(self, path='./gitolite-admin'):\n self._repo_path = path\n self._user_repo_config = path + \"/conf/user_repos.conf\"\n self._gitolite_config = path + \"/conf/gitolite.conf\"\n self._key_path = path + \"/keydir/\"\n\n self._slaves_string = None\n gitolite_admin_conf_file = open(self._gitolite_config, \"r\")\n for line in gitolite_admin_conf_file:\n if re.match(\"option mirror.slaves\", line):\n self._slaves_string = line.split(\"=\")[1]\n gitolite_admin_conf_file.close()\n break\n\n self._repo_data = self.__load_repo()\n\n def save_repo(self):\n self.__save_repo(self._repo_data)\n\n def addRepo(self, username, reponame, add_user=True):\n \"\"\"\n Adds a new repo to gitolite.\n returns true iff successfully added repo to config\n \"\"\"\n\n repo_data = self.__load_repo()\n\n repo = username + '/' + reponame\n if repo in repo_data:\n return False\n\n repo_data[repo] = []\n if add_user:\n repo_data[repo].append(( 'RW+', username ))\n\n self.__save_repo(repo_data)\n\n return True\n\n def addUserToRepo(self, username, reponame, user, permission):\n \"\"\"\n Adds 'user' withth 'permission' to 'reponame' of 'username' to config\n returns true iff successfully added users permission\n \"\"\"\n repo_data = self.__load_repo()\n\n repo = username + '/' + reponame\n if repo not in repo_data:\n return False\n\n for i, (_, existing_user) in enumerate(repo_data[repo]):\n if existing_user == user:\n repo_data[repo][i] = (permission, user)\n break\n else:\n repo_data[repo].append((permission, user))\n\n self.__save_repo(repo_data)\n\n return True\n\n def removeUserFromRepo(self, username, reponame, user):\n \"\"\"\n Removes 'user' from 'reponame' of 'username' from config.\n \"\"\"\n repo_data = self.__load_repo()\n\n repo = username + '/' + reponame\n if repo not in repo_data:\n return False\n\n to_remove = []\n for i, (_, existing_user) in enumerate(repo_data[repo]):\n if existing_user == user:\n to_remove.append(i)\n\n for i in to_remove:\n del repo_data[repo][i]\n\n self.__save_repo(repo_data)\n\n return True\n\n def rmRepo(self, username, reponame):\n \"\"\"\n Removes a repo\n returns true iff successfully removed repo from config.\n \"\"\"\n\n repo_data = self.__load_repo()\n\n repo = username + '/' + reponame\n\n if repo not in repo_data:\n return False\n\n del repo_data[repo]\n\n self.__save_repo(repo_data)\n\n return True\n\n def getRepos(self):\n return self.__load_repo()\n\n\n def addSSHKey(self, username, keyname, sshkey):\n\n key_file_name = self.__get_ssh_key_path(username, keyname)\n\n try:\n with open(key_file_name) as f:\n return False\n except IOError as e:\n pass\n\n new_key_file = open(key_file_name, 'w')\n new_key_file.write(sshkey)\n new_key_file.close()\n\n return True\n\n def rmSSHKey(self, username, keyname):\n\n key_file_name = self.__get_ssh_key_path(username, keyname)\n\n try:\n os.remove(key_file_name)\n except:\n return False\n\n return True\n\n def getSSHKeys(self):\n\n keys = glob.glob(self._key_path + '*@*.pub')\n\n key_data = {}\n\n for keyfile in keys:\n filename = os.path.basename(keyfile)[:-4]\n filename_split = filename.split('@',1)\n\n if len(filename_split) != 2:\n raise SyntaxError('Invalid key file name')\n\n username = filename_split[0].strip()\n keyname = filename_split[1].strip()\n\n if username not in key_data:\n key_data[username] = []\n\n key_data[username].append(keyname)\n\n return key_data\n\n def __get_ssh_key_path(self, username, keyname):\n return self._key_path + username + \"@\" + keyname + \".pub\"\n\n def __load_repo(self):\n \"\"\"\n Read gitolite config file\n \"\"\"\n\n repo_data = {}\n\n #repo [username]/[reponame]\n # RW+ = [username]\n\n repo_file_content = open(self._user_repo_config, 'r')\n\n line = repo_file_content.readline().strip()\n repo = ''\n\n while line != '':\n\n if line == '\\n':\n # Consume empty lines.\n line = repo_file_content.readline()\n continue\n\n if line.startswith('repo'):\n line_split = line.split(None, 1)\n if len(line_split) != 2:\n raise SyntaxError('Invalid repository def.')\n repo = line_split[1].strip()\n repo_data[repo] = []\n elif line.startswith(' '):\n if repo == '':\n raise SyntaxError('Missing repo def.')\n\n line_split = line.split('=', 1)\n if len(line_split) != 2:\n raise SyntaxError('Invalid rule')\n\n perm = line_split[0].strip()\n user = line_split[1].strip()\n\n if repo not in repo_data:\n repo_data[repo] = []\n\n repo_data[repo].append( ( perm, user) )\n elif line.startswith(\"option\"):\n # Gitolite mirroring options\n if repo == '':\n raise SyntaxError('Missing repo def.')\n else:\n pass\n else:\n raise SyntaxError('Invalid line: ' + line)\n\n line = repo_file_content.readline()\n\n repo_file_content.close()\n\n return repo_data\n\n def __save_repo(self, repo_data):\n \"\"\"\n Write gitolite config file\n \"\"\"\n\n\n tmp_file = tempfile.NamedTemporaryFile('w')\n\n for reponame, permlist in repo_data.items():\n tmp_file.write('repo ' + reponame + '\\n')\n for perm, user in permlist:\n tmp_file.write(\" \" + perm + \" = \" + user + '\\n')\n\n # Adds mirroring options\n if self._slaves_string is not None:\n tmp_file.write('option mirror.master = gitolite-master\\n')\n tmp_file.write('option mirror.slaves =' + self._slaves_string + '\\n')\n\n tmp_file.flush()\n shutil.copyfile(tmp_file.name, self._user_repo_config)\n tmp_file.close()\n" }, { "alpha_fraction": 0.7929824590682983, "alphanum_fraction": 0.7929824590682983, "avg_line_length": 27.5, "blob_id": "5ceb54969e853782a97cab614f84ed5b973d4e32", "content_id": "d99523d8c2e1788efeea16f6bb89c4f42a770123", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 285, "license_type": "permissive", "max_line_length": 57, "num_lines": 10, "path": "/tests/__init__.py", "repo_name": "devmapal/gitolite-manager", "src_encoding": "UTF-8", "text": "import unittest\n\nfrom tests.gitolite import ConfigFileTestCase\nfrom tests.gitolite import SSHKeyTestCase\n\ndef all_tests():\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(ConfigFileTestCase))\n suite.addTest(unittest.makeSuite(SSHKeyTestCase))\n return suite\n" }, { "alpha_fraction": 0.5692307949066162, "alphanum_fraction": 0.5923076868057251, "avg_line_length": 17.571428298950195, "blob_id": "916e90005e4bb0578477d93ba6fd33ed29e9a0c8", "content_id": "6aa0ec6539eaac977fbc272712d1578ceff28ec3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 130, "license_type": "permissive", "max_line_length": 49, "num_lines": 7, "path": "/gitolite_manager/__init__.py", "repo_name": "devmapal/gitolite-manager", "src_encoding": "UTF-8", "text": "from gitolite import Gitolite\n\n__version__ = '0.0.0'\n\nVERSION = tuple(map(int, __version__.split('.')))\n\n__all__ = [ 'Gitolite' ]\n" }, { "alpha_fraction": 0.6546593308448792, "alphanum_fraction": 0.6546593308448792, "avg_line_length": 21.38596534729004, "blob_id": "ef7f74a4eb7e1c00f524ef483355f6bfc233053f", "content_id": "a797638a41ea26910535f020ae1f7ecd678cc566", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1277, "license_type": "permissive", "max_line_length": 91, "num_lines": 57, "path": "/README.md", "repo_name": "devmapal/gitolite-manager", "src_encoding": "UTF-8", "text": "Gitolite Manager\n================\n\nManage [gitolite](https://github.com/sitaramc/gitolite) config and ssh keys.\n\n*Currently in pre-alpha, not polished at all!*\n\n\n## Installation\n\n $ sudo pip install gitolite-manager\n\nor alternatively (you really should be using pip though):\n\n $ sudo easy_install gitolite-manager\n\nFrom source:\n\n $ sudo python setup.py install\n\n## Getting Started\n\n### Prepare gitolite\n\nAdd the following line to gitolite configuration file (./gitolite-admin/conf/gitolite.conf)\n\n include \"user_repos.conf\"\n\nBy default, it'll use `./gitolite-admin` as gitolite directory.\n\n### Add/remove repositories\n\n >>> import gitolite_manager\n >>> gitolite = gitolite_manager.Gitolite()\n >>> gitolite.addRepo('username', 'reponame')\n True\n >>> gitolite.getRepos()\n {'username/reponame': [('RW+', 'username')]}\n >>> gitolite.rmRepo('username', 'reponame')\n True\n >>> gitolite.getRepos()\n {}\n\n\n\n### Add/remove ssh keys\n\n >>> import gitolite_manager\n >>> gitolite = gitolite_manager.Gitolite()\n >>> gitolite.addSSHKey('username', 'keyname', 'ssh key content')\n True\n >>> gitolite.getSSHKeys()\n {'username': ['keyname']}\n >>> gitolite.rmSSHKey('username','keyname')\n True\n >>> gitolite.getSSHKeys()\n {}\n\n" } ]
6
Ruslan-Gabitov/Open_and_read_file
https://github.com/Ruslan-Gabitov/Open_and_read_file
476aea72323ca67b2e1464c933cba418a268594c
15994aefe4cb9aa78b0a4b60b011677894fff589
b6952ff743d25984cde3418cde5c66705628056c
refs/heads/main
2023-06-03T11:12:00.250079
2021-06-20T19:28:59
2021-06-20T19:28:59
378,724,764
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5619791746139526, "alphanum_fraction": 0.5651041865348816, "avg_line_length": 34.574073791503906, "blob_id": "f97f688bf61f105daaa00e3a5a96eb3ba5a168bc", "content_id": "c4b35828876c2ecc0e6d5e006517eb6771a1c593", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1949, "license_type": "no_license", "max_line_length": 151, "num_lines": 54, "path": "/main.py", "repo_name": "Ruslan-Gabitov/Open_and_read_file", "src_encoding": "UTF-8", "text": "def get_cook_book(file):\n cook_book = {}\n with open (file) as recipes:\n while True:\n item = recipes.readline().strip()\n if item == '':\n return cook_book\n ingridients = []\n cook_book[item] = ingridients\n number = recipes.readline().strip()\n if number.isdigit():\n for i in range(int(number)):\n ingredient_name, quantity, measure = recipes.readline().strip().split(' | ')\n ingridients.append({'ingredient_name': ingredient_name, 'quantity': quantity, 'measure': measure})\n recipes.readline().strip()\n\n\ndef get_shop_list_by_dishes(dishes, person_count):\n ingredients = {}\n cook_book = get_cook_book(\"recipes.txt\") \n for dish in cook_book:\n if dish in dishes:\n for ingredient in cook_book[dish]:\n ingredients[ingredient['ingredient_name']] = {'measure': ingredient['measure'], 'quantity': int(ingredient['quantity']) * person_count}\n return ingredients \n\n\ndef get_sorted_files(files):\n count = {}\n sorted_file = {}\n for file in files:\n with open (file) as item:\n number_of_rows = sum(1 for line in item if line.strip())\n count[number_of_rows] = file\n for lines in sorted(count):\n sorted_file[count[lines]] = lines\n return sorted_file\n\n\ndef get_merged_file(files):\n text = ''\n for file, lines in files.items():\n with open (file) as item:\n text = item.read()\n with open('merged_file.txt', 'a', encoding='utf=8') as merged_file:\n merged_file.write(f'{file}\\nСтрок: {lines}\\n{text}\\n')\n \n\nif __name__ == \"__main__\":\n get_merged_file(get_sorted_files(['1.txt', '2.txt', '3.txt']))\n print(get_cook_book('recipes.txt'))\n print()\n print()\n print(get_shop_list_by_dishes(['Запеченный картофель', 'Омлет'], 2))" } ]
1
HiredMark/OneCleverHorse
https://github.com/HiredMark/OneCleverHorse
0d56630024b3b87eea5e56f8ed732c313539453f
923ac3e27a0835ca56a3d8a0b62ed15ec4331671
2c3a83799b0febd243220f44fde4c767548d4a32
refs/heads/main
2023-04-04T17:33:22.899230
2021-04-12T14:48:36
2021-04-12T14:48:36
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6522781848907471, "alphanum_fraction": 0.6810551285743713, "avg_line_length": 23.52941131591797, "blob_id": "d358a33a5109609798a82bb811ad4382cb9006f1", "content_id": "50e75fd57a02732a7b9f9fa297e4b4b843faa882", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 417, "license_type": "permissive", "max_line_length": 69, "num_lines": 17, "path": "/app-frontend/app.py", "repo_name": "HiredMark/OneCleverHorse", "src_encoding": "UTF-8", "text": "import random\nimport json\nimport requests\nfrom flask import Flask, request, render_template\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\n\n\[email protected]('/', methods=['GET', 'POST'])\ndef home():\n rcomb = request.args.get('rcomb','http://appcombiner:5003/rcomb')\n return render_template('index.html', rcomb=str(rcomb))\n\n\nif __name__=='__main__':\n app.run(host='0.0.0.0', port=5000, debug=True)\n" }, { "alpha_fraction": 0.6317241191864014, "alphanum_fraction": 0.6758620738983154, "avg_line_length": 33.52381134033203, "blob_id": "8a20a1c79864916b0f43aef0101272b4b53b0d8c", "content_id": "496f328a7045cfc43b0e7dcc8ad8313b1abf1355", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 725, "license_type": "permissive", "max_line_length": 97, "num_lines": 21, "path": "/app-combiner/app.py", "repo_name": "HiredMark/OneCleverHorse", "src_encoding": "UTF-8", "text": "import random\nimport json\nimport requests\nfrom flask import Flask, request, render_template, Response\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\n\n\[email protected]('/comb', methods=['GET', 'POST'])\ndef comb():\n adj1 = request.args.get('randj','http://appadjevtive:5001/adj')\n adj2 = request.args.get('randj','http://appadjective:5001/adj')\n noun1 = request.args.get('rnoun','http://appnoun:5002/noun')\n noun2 = request.args.get('rnoun','http://appnoun:5002/noun')\n rcomb = txt.title(str(adj1))+txt.title(str(noun1))+txt.title(str(adj2))+txt.title(str(noun2))\n return Responce( str(rcomb), mimetype='text/plain' )\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5001, debug=True)\n" }, { "alpha_fraction": 0.6599264740943909, "alphanum_fraction": 0.6746323704719543, "avg_line_length": 20.760000228881836, "blob_id": "9ff8995d205ba3c8bf0a198162a9e759f725150f", "content_id": "d57ecd541cbf8d07e323161c03115a95b2113d8d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 544, "license_type": "permissive", "max_line_length": 59, "num_lines": 25, "path": "/app-noun/app.py", "repo_name": "HiredMark/OneCleverHorse", "src_encoding": "UTF-8", "text": "import random\nimport json\nimport requests\nfrom flask import Flask, request, render_template, Response\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\n\nnounlist = []\nwith open('nouns.json') as f:\n data = json.load(f)\n\nfor noun in data['nouns']:\n nounlist.append(noun)\nrandomnoun = random.choice(nounlist)\n\n\[email protected]('/noun', methods=['GET', 'POST'])\ndef noun():\n rnoun = randomnoun\n return Responce( str(rnoun), mimetype='text/plain' )\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5002, debug=True)\n" }, { "alpha_fraction": 0.7407407164573669, "alphanum_fraction": 0.7407407164573669, "avg_line_length": 12.75, "blob_id": "304ba52b149ac9a319ff3b50bdd996eb74d56cb0", "content_id": "d2b2105be26093620d97467a7950aec83264df30", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 54, "license_type": "permissive", "max_line_length": 23, "num_lines": 4, "path": "/app-frontend/create.py", "repo_name": "HiredMark/OneCleverHorse", "src_encoding": "UTF-8", "text": "from frontend import db\n\ndb.drop_all()\ndb.create_all()" }, { "alpha_fraction": 0.6516007781028748, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 20.239999771118164, "blob_id": "d1eebdf0603af82e0a8bc3d3d3e4a3908dd338b8", "content_id": "3dde0a025af3896ad8b4e4a942d1740e906efaae", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 531, "license_type": "permissive", "max_line_length": 59, "num_lines": 25, "path": "/app-adjective/app.py", "repo_name": "HiredMark/OneCleverHorse", "src_encoding": "UTF-8", "text": "import random\nimport json\nimport requests\nfrom flask import Flask, request, render_template, Response\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\n\nadjlist = []\nwith open('adjs.json') as f:\n data = json.load(f)\n\nfor adj in data['adjs']:\n adjlist.append(adj)\nrandomadj = random.choice(adjlist)\n\n\[email protected]('/adj', methods=['GET', 'POST'])\ndef adj():\n radj = randomadj\n return Responce( str(radj), mimetype='text/plain' )\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5001, debug=True)\n" }, { "alpha_fraction": 0.505325436592102, "alphanum_fraction": 0.7065088748931885, "avg_line_length": 15.920000076293945, "blob_id": "bfb65bc3989ff225b0c4b626d8632f55a0bfe596", "content_id": "dcbf36a90b54b7965e3db5da545db0e1f15392df", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 845, "license_type": "permissive", "max_line_length": 26, "num_lines": 50, "path": "/app-combiner/requirements.txt", "repo_name": "HiredMark/OneCleverHorse", "src_encoding": "UTF-8", "text": "attrs==20.3.0\nAutomat==0.6.0\nblinker==1.4\ncertifi==2020.12.5\nchardet==4.0.0\nclick==7.1.2\ncolorama==0.3.7\nconfigobj==5.0.6\nconstantly==15.1.0\ncryptography==2.1.4\nFlask==1.1.2\nFlask-SQLAlchemy==2.5.1\ngreenlet==1.0.0\nhttplib2==0.9.2\nhyperlink==17.3.1\nidna==2.10\nimportlib-metadata==3.10.0\nincremental==16.10.1\niniconfig==1.1.1\nitsdangerous==1.1.0\nJinja2==2.11.3\njsonpatch==1.16\njsonpointer==1.10\njsonschema==2.6.0\nkeyring==10.6.0\nkeyrings.alt==3.0\nMarkupSafe==1.1.1\nnetifaces==0.10.4\noauthlib==2.0.6\npackaging==20.9\npluggy==0.13.1\npy==1.10.0\npyasn1==0.4.2\npyasn1-modules==0.2.1\npycrypto==2.6.1\nPyJWT==1.5.3\npyOpenSSL==17.5.0\npyparsing==2.4.7\npyserial==3.4\npytest==6.2.3\npytest-mock==3.5.1\npyxdg==0.25\nrequests==2.25.1\nrequests-unixsocket==0.1.5\nSecretStorage==2.3.1\nservice-identity==16.0.0\nsix==1.11.0\nSQLAlchemy==1.4.7\nWerkzeug==1.0.1\nzipp==3.4.1" }, { "alpha_fraction": 0.7918781638145447, "alphanum_fraction": 0.7918781638145447, "avg_line_length": 38.400001525878906, "blob_id": "d73686887eebeb7ca0a9b0deb80980b5ac9fbd02", "content_id": "8bfab91f36cc92f9737d84947c86b916ed4dcd48", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 197, "license_type": "permissive", "max_line_length": 92, "num_lines": 5, "path": "/nginx-depreciated/README.txt", "repo_name": "HiredMark/OneCleverHorse", "src_encoding": "UTF-8", "text": "Hi, \n\nSo this is a seperate depoloyment to the stack that creates the website. \nThis is to avoid routing issues with the reverse proxy, and it allows for your reverse proxy\nto scale independently. " }, { "alpha_fraction": 0.7373223304748535, "alphanum_fraction": 0.744428277015686, "avg_line_length": 45.382022857666016, "blob_id": "1a9e6778ba0815d7693340ba0da471ce07b36b0c", "content_id": "4b04f50706d7fbcdeafa6fb4489f70025c28a929", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 12384, "license_type": "permissive", "max_line_length": 362, "num_lines": 267, "path": "/README.md", "repo_name": "HiredMark/OneCleverHorse", "src_encoding": "UTF-8", "text": "# OneCleverHorse\nAuthor: Suner Syuleyman - who is currently ill\n\nA docker swarm password generator based on an XKCD cartoon\n\n![XKCD Cartoon in question](https://imgs.xkcd.com/comics/password_strength.png)\n\n## Contents\n* [Intro](#OneCleverHorse)\n* [Architecture](#architecture)\n * [Microservices Architecture](#Micro-service-architecture-example)\n * [Production Environment](#database-structure)\n* [Development Environment](#Development-Environment)\n * [GCP Server Setup](#GCP-Server-Setup)\n * [Tools](#Tools)\n * [Takeaways from previous project](#Takeaways-from-previous-project)\n * [Kanban board](#Kanban-board)\n * [Docker Compose Installation Script](#Docker-Compose-Installation-Script)\n * [Tools](#Tools)\n * [Docker Swarm - Worker Creation Startup Script](#Docker-Swarm---Worker-Creation-Startup-Script)\n* [ Continuous integration / Continuous deployment](#Continuous-integration-/-Continuous-deployment)\n * [Github - Version control system](#Github---Version-control-system)\n * [Jenkins - Pipeline and testing](#Jenkins---Pipeline-and-testing)\n * [Docker Repository - Artifact Repository](#Docker-Repository---Artifact-Repository)\n* [Risk Assesment](#Risk-Assesment)\n * [SWOT Analysis](#SWOT-Analysis)\n * [Risk assessment Matrix and Particular Issues](#Risk-assessment-Matrix-and-Particular-Issues)\n* [Testing](#Testing)\n* [Future Improvements](#future-improvements)\n* [References](#References)\n\n# Architecture\n\n## Micro-service architecture example \n\n Web Client\n { Load Balancer } \n [ Docker ] \n DB Stack -> APP Stack -> Webfront Stack \n (DB INST) (APP INST) (WEB INST)\n (DB INST) (APP INST) (WEB INST)\n (DB INST) (APP INST) (WEB INST)\n (DB INST) (APP INST) (WEB INST)\n (DB INST) (APP INST) (WEB INST)\n (DB INST) (APP INST) (WEB INST)\n (DB INST) (APP INST) (WEB INST)\n\n## Service 1\nFront end, powered by Flask, HTML, CSS and Javascript. Assembles everything and shows it all nicely using Twitter Bootstrap styling.\n## Service 2\nFetches random nouns from a list.\n## Service 3\nFetches random adjectives from a list.\n## Service 4\nIs a combiner that generates a combined string for Service 1.\n\n## Production Environment\n\nThis is the architecture design for the current deployment. What it does is it hosts most of the services inside a Docker Swarm, that then meets with NGINX to hide the back end and show the end product to the user. For a database I am using a fully managed Relational MYSQL Database by GCP.\n\n![Architecture](./ReadmeAssets/Diagram.png)\nThis is how it looks like on the GCP Cloud Console.\n![Old Layout](./ReadmeAssets/OldLayout.png)\nThe old layout includes a lot of extra services. It's biggest failure was to resolve DNS names inside the private network. \n![New Layout](./ReadmeAssets/NewLayout.png)\nThis resulted in this new layout that is all based in eu-west-2a and that helps Ansible and Jenkins to be able to call servers by name due to dynamic IPs\n\n# Development Environment\n## GCP Server Setup\n1. Boot up GCP Server\n2. Edit instance and place key gennereated using ssh-keygen\n3. ssh into server public IPv4 using Powershell\n4. Use VSCode Plug in to now connect into the server which should be listed as a remote connection. \n\nFor more info : <https://code.visualstudio.com/docs/remote/ssh>\n\n## Tools\n\n+ Ubuntu 18.04 - Development environement. Chosen for its stability and LTS support. \n+ MS Visual Studio Code - Requirement of the project and a nice tool to use with loads of plugins.\n+ Twitter Bootstrap 5 - Quick CSS Styling.\n+ Jenkins - Open source CI/CD pipeline builder. Very flexible in its implementation.\n+ Docker - Simple container maker.\n+ Docker Swarm - Simple deployment\n+ Trello - Kanban board of choice\n\n## Takeaways from previous project\n\n1. Avoid feature creep, really trim this project <https://github.com/78afec35/Personal-Project>\n2. Focus on success early on by reading the requirements < Requirements unclear\n3. Integrate early on. < Pipeline has been the focus of this project\n\n## Kanban board\n\nUsing Trello as a kanban board - <https://trello.com/b/Ko3b7CXN/onecleverhorse>\n\n![Trello](./ReadmeAssets/Trello.png)\n\n## Docker Compose Installation Script\n\n # make sure jq & curl is installed\n sudo apt update\n sudo apt install -y curl jq\n # set which version to download (latest)\n version=$(curl -s https://api.github.com/repos/docker/compose/releases/latest | jq -r '.tag_name')\n # download to /usr/local/bin/docker-compose\n sudo curl -L \"https://github.com/docker/compose/releases/download/${version}/docker-compose-$(uname -s)-$(uname -m)\" -o /usr/local/bin/docker-compose\n # make the file executable\n sudo chmod +x /usr/local/bin/docker-compose\n\n## Docker Swarm - Worker Creation Startup Script\n\n #!/bin/bash \n sudo apt update\n sudo apt install build-essential -y \n sudo apt install docker.io -y\n sudo groupadd docker\n sudo gpasswd -a $USER docker\n sudo su - \n sudo su jenkins\n docker swarm join --token SWMTKN-1-5qh189liw1ahad0yen6kbf6stl0qxkez1144wn7azwg4w9ide8-7jgw3pql5nlkvii8f2ejht9pi 10.154.0.9:2377\n\n# Continuous integration / Continuous deployment\nThis is the complete pipeline at the moment. \n\n![Pipeline](./ReadmeAssets/Pipeline.png)\n\n1. Webhooks fetch the github repository.\n2. The local Jenkins server is cleaned and the up to date repo is cloned down.\n3. The Jenkins server is set up to ensure it is running properly. \n4. Pytest is run against the code. \n5. Docker-Compose builds all the images according to its manifest. \n6. Images are tagged and pushed to DockerHub.\n7. Images are then pulled down again and ran through a test deployment stage where they deploy on the Jenkins server to ensure they are working. \n8. Jenkins server is wiped and all docker images removed. \n9. Ansible configures the swarm and reverse proxy. \n10. Jenkins performs and SSH connection into the manager and spins up the containers in it.\n\nThis is where I encounter a killer error that I cannot fix at the moment. I am not sure entirely where it originates. However it leads this pipeline to stall out as it cannot complete the deployment. \n\n![Killer Error](./ReadmeAssets/Killererror.png)\n\nThe most likely cause for this is that servies are span up in the workers and don't show up on the manager. I am not sure why that is. \n\n*EDIT* I have spent a good part of a day exploring what the error is and once I gave up and started to actually code using this pipeline, I figured out that whilst the test deployment with docker-compose passes, stack deploy is more particular about what it deploys, as well as it keeps logs. Ergo the killer error was just me using mock code that didn't work. \n\nTroubleshooting stages so far include : \n1. Destroying and recreating the server. \n2. Removing tags or any kind of capitalisation.\n3. Reinstalling docker and all prerequisites. \n4. Scaling services up and down. \n\n## Github - Version control system\n\nGit hub has been used as a version control system. It includes a main branch, a dev branch and a feature branch. \n\n![Github](./ReadmeAssets/git.png)\n\n## Jenkins - Pipeline and testing\n\nJenkins has been part of the development process since the start this time. \n\n![Jeknins](./ReadmeAssets/Jenkins.png) \n\nAs you can see a lot of builds are aborted however due to the previous error that doesn't allow them to complete.\n\n## Docker Repository - Artifact Repository\n\nDocker Hub was used as an artifact repository for this project. \n\n![Docker Hub](./ReadmeAssets/dockerhub.png)\n\nHere my renaming attempts can be seen. \n\n![Docker Tags](./ReadmeAssets/dtag.png)\n\nI initially used BUILD_NUMBER environment variable in Jenkins to automatically tag each build this was lated scrapped as I was trying to complete pipeline and it was interfering with the deployment process. \n\n\n# Risk Assesment\n\n## SWOT Analysis\n\n### <span style=\"color:green\">Strengths</span>\n+ Detailed pipeline\n+ Unique Idea\n+ Relatively easy to execute\n+ Easy to scale up, in fact the project is running on 3 replicas by default\n### <span style=\"color:purple\">Weaknesses</span>\n+ Incomplete\n+ Vulnerable to scaling issues\n+ Very poor availability from a single AZ\n### <span style=\"color:blue\">Opportunities</span>\n+ Can be integrated in an infrastructure as code solution to address some of the weaknesses\n### <span style=\"color:red\">Threats</span>\n+ No real security measures aside from using a secrets manager and a reverse proxy. \n+ Vulnerable to common attacks. \n\n## Risk assessment Matrix and Particular Issues\n\n 1 2 3 4 5 < Severity\n 1 Y O O R R\n 2 Y Y O O R\n 3 G Y O O R\n 4 G Y Y O O\n 5 G G Y Y O\n ^ \n Likelyhood\n\n G = Green\n Y = Yellow\n O = Orange\n R = Red\n example: 5-3 Very Unlikely, Medium Severity\n\n### 1-5 Category: <span style=\"color:red\">RED</span>. Almost Certain, Catastropic Severity - Developer is ill. \n\nI am rather ill. This has impacted my ability to work. As such I haven not managed to complete this project. \n\n### 1-5 Category: <span style=\"color:red\">RED</span> Almost Certain, Catastropic Severity - Docker Swarm error\n\nDocker Swarm is exhibiting buggy behaviour where it either is not correctly reporting the number of container replicas or it is failing to bring them up.\nIt is possible that is failing to mention some kind of prerequisite required for it to run. As was found yesterday when it needed docker compose to read the docker-compose.yaml file correctly. \n\n### 1-5 Category: <span style=\"color:red\">RED</span> Almost Certain, Catastropic Severity - Requirements unclear until a few days ago.\n\nProject requirements have been kept secret from this cohort untill very recently. As a timeline: \n1. This project didn't exist\n2. This project was unlikely\n3. This project was now happening\n4. This project isn't that important\n5. This project doesn't have that many tools you can do it an hour. \n6. Failing this project means you fail the cohort. \n7. Oh you have to use this software, don't worry I will teach you now. \n\nThe actual document we were provided in the beginning doesn't go into detail of the requirements, whilst the actual marking is very particular of what is required of us. That is not conducive of effective performance.\n\n### 5-5 Category: <span style=\"color:orange\">ORANGE</span> Rare, Catastropic Severity - Poor Availibity and Elasticity\n\nFor me it is unlikely that this is going to be a production application. However if this was a production environment the risk rating would have been upgraded. Mostly because this is with regards to the application's poor availibity and scalability. \n\nThese aspects of the application are handled manually and are prone to human error. \n\nMore so a human cannot correctly anticipate demand and so it would definately result in website downtime.\n\n### 5-5 Category: <span style=\"color:orange\">ORANGE</span> Rare, Catastropic Severity - Poor Credentials Management\n\nDocker Credentials have to be inputted manually which further adds to technical debt in this project. It requires some manual touch ups to both Ansible and Jenkins to ensure they work. Such as handling evnironment variables over SSH connections and the like.\n\n### 5-5 Category: <span style=\"color:orange\">ORANGE</span> Rare, Catastropic Severity - MYSQL is a seperate service and cannot be handled effectively using Ansible\n\nThis results in the Database not being part of the CI/CD pipeline and becoming vulnerable to configuration errors and mismanagement.\n\n# Testing\n\nAt this point in time it is incomplete. \nJenkins does do a fair amount of testings on its own. However I would have liked to involve native python and flask testing utilities as well as integration testing like Selinium to ensure the swarm cloud is up and running properly. \n\n# Future Improvements\n\n1. Obviously with more time I would have liked to complete the project. This involves intergration with a database and testing. \n2. Better layout for the front end. I would have liked it to be pretier. \n3. Increased complexity, with the adition of a database I would have liked to be able to create large lists of users and passwords and export them as JSON for the user.\n\n# References \n1. noun list - https://github.com/dariusk/corpora/blob/master/data/words/nouns.json\n2. adjective list - https://github.com/dariusk/corpora/blob/master/data/words/adjs.json\n" }, { "alpha_fraction": 0.7421163320541382, "alphanum_fraction": 0.7554309964179993, "avg_line_length": 40.97058868408203, "blob_id": "38bd22be3f83e2e409522ce140dddbcdbf58df90", "content_id": "7b129f12a83c20f22d653b5ebfa449a28e5818f8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1427, "license_type": "permissive", "max_line_length": 149, "num_lines": 34, "path": "/stack_run.sh", "repo_name": "HiredMark/OneCleverHorse", "src_encoding": "UTF-8", "text": "#!/bin/bash\nssh -tt -i ~/.ssh/id_rsa -o StrictHostKeyChecking=no jenkins@swarm-manager << EOF\n export DATABASE_URI=${DATABASE_URI}\n export SECRET_KEY=${SECRET_KEY}\n print SECRET_KEY\n print DATABASE_URI\n rm -rf OneCleverHorse\n git clone https://github.com/78afec35/OneCleverHorse\n cd OneCleverHorse\n pwd\n docker image prune -f -a\n docker rmi -f\n docker rm -f\n docker ps\n sudo curl -L \"https://github.com/docker/compose/releases/download/1.29.0/docker-compose-$(uname -s)-$(uname -m)\" -o /usr/local/bin/docker-compose\n sudo chmod +x /usr/local/bin/docker-compose\n docker-compose --version\n sudo apt install python3 python3-pip\n sudo pip3 install requests\n docker stack deploy --compose-file /home/jenkins/OneCleverHorse/docker-compose.yaml onecleverhorsestack\n docker stack services onecleverhorsestack\n docker service scale onecleverhorsestack_appnoun=6\n docker service scale onecleverhorsestack_appnoun=3\n docker service scale onecleverhorsestack_appcombiner=6\n docker service scale onecleverhorsestack_appcombiner=3\n docker service scale onecleverhorsestack_appfrontend=6\n docker service scale onecleverhorsestack_appfrontend=3\n docker service scale onecleverhorsestack_appadjective=6\n docker service scale onecleverhorsestack_appadjective=3\n docker stack services onecleverhorsestack\n pwd\n exit\nEOF\necho \"This pipeline has completed!\"\n" }, { "alpha_fraction": 0.7115384340286255, "alphanum_fraction": 0.7403846383094788, "avg_line_length": 28.571428298950195, "blob_id": "4a7ab4bf7d31dbdcbe5fced67fb86022d34d4357", "content_id": "8d208a254402b94d91da00d0fea3fbbc1bb3d77a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 208, "license_type": "permissive", "max_line_length": 52, "num_lines": 7, "path": "/app-frontend/Dockerfile", "repo_name": "HiredMark/OneCleverHorse", "src_encoding": "UTF-8", "text": "FROM python:latest\nRUN apt-get update && apt-get install -y python3-pip\nCOPY . .\nRUN pip install --upgrade pip\nRUN pip install -r requirements.txt\nEXPOSE 5000\nENTRYPOINT [\"/usr/local/bin/python3\", \"app.py\"]\n\n" } ]
10
rlion7/GHCNpy
https://github.com/rlion7/GHCNpy
0fcf6c163f96559af1c014c91c7453a172c495d6
034dcac9a944c880a87831598a398aaecd259940
c5828f3fa1a823721e4fd17d1100f93182e0984c
refs/heads/master
2022-12-10T19:02:25.099863
2020-09-11T23:05:35
2020-09-11T23:05:35
294,237,512
0
0
NOASSERTION
2020-09-09T21:50:52
2020-08-11T11:44:53
2017-10-03T17:31:30
null
[ { "alpha_fraction": 0.6958136558532715, "alphanum_fraction": 0.7137069702148438, "avg_line_length": 33.04597854614258, "blob_id": "f4507657a22f2b3abf290b81c5be45e76284bebe", "content_id": "b93392179757358263e6386bf052e99c10c449c6", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2962, "license_type": "permissive", "max_line_length": 217, "num_lines": 87, "path": "/Weather_Data.py", "repo_name": "rlion7/GHCNpy", "src_encoding": "UTF-8", "text": "# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.5'\n# jupytext_version: 1.6.0\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# # Weather Data from Global Historical Climate Norms Daily\n#\n# Original code from: https://github.com/jjrennie/GHCNpy\n# Modified by: Robert Lion\n#\n# ## Modifications: \n# * Ported to Python 3\n# * Added Pandas DataFrame output\n#\n# ## Future Improvements:\n# * Add averaging / combination feature to pool data from multiple stations\n# * Lots and lots of cleanup to smooth out the original functions\n# * In particular, some calls will just re-download files that you may already have, so this could be improved to prevent downloading (with option to download new file, as they're updated daily at the source location.\n# * Update functions to allow batch outputs of multiple stations\n\nimport ghcnpy as gp\nimport pandas as pd\nimport numpy as np\nimport datetime\n\n# ## Input the latitude and longitude of your target location.\n# Google maps is a nice convenient source.\n\n# +\n# lat and lon in decimal format\nlat = -18.831751\nlon = 48.308693\n\n# distance threshold (km from target)\ndist = 200\n\n# print a list of stations within range\ngp.find_station(lat, lon, dist)\n\n# future feature, maybe, put station names into DataFrame\n# stn_cols = ['GHCN_ID','LAT','LON','ELEV','ST','STATION_NAME']\n# stn_df = pd.DataFrame(gp.find_station(lat, lon, dist), columns=stn_cols)\n# -\n\n# ## Create raw CSV file\n# Select the station ID (GHCND ID) from the list above and paste it into the output function below. \n# The output CSV file is generated from the source file (a fixed-width text file).\n# Limited cleanup is done as follows:\n# * Original data is one line per measurement with 31 value fields (i.e. one line for TMAX for January, one line for TMIN for January, etc.)\n# * The data is reshaped so each line contains one day with all possible measurements.\n# * A huge number of missing values are generated as each weather station may not report all possible measurements. These are left in this raw CSV file and removed later.\n\nstation_id = \"MA000067095\"\ngp.output_to_csv(station_id)\n\n# ## Convert the csv to a Pandas DataFrame\n# Initial data cleanup is automated as follows:\n# * Missing values (-9999.9) are replaced with NaN\n# * Columns with no real values are dropped (i.e. station doesn't track that data)\n# * Date column is created from YYYY MM DD parts, non-real dates are dropped (e.g. Feb 30)\n# * Dates in the future are dropped\n\ndf = gp.csv_to_dataframe(station_id)\n\n# ## Output Sample\n# Output below shows end of DataFrame for review.\n\ndf.tail(50)\n\n# ## Write Cleaned CSV\n# The clean DataFrame object is written out as a CSV.\n\nlast_date_str = str(df.iloc[-1,0]) + str(df.iloc[-1,1]) + str(df.iloc[-1,2])\nout_path = station_id + \"_cln_\" + last_date_str + \".csv\"\ndf.to_csv(out_path)\n\n# ## END\n" }, { "alpha_fraction": 0.5633661150932312, "alphanum_fraction": 0.5793473124504089, "avg_line_length": 31.897058486938477, "blob_id": "099fde7003d99adff0708ae94bb73fec70ebd28c", "content_id": "fd777dfbb7a9a0e554dd1b547bf8400d0d7e2a3a", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8948, "license_type": "permissive", "max_line_length": 134, "num_lines": 272, "path": "/ghcnpy/iotools.py", "repo_name": "rlion7/GHCNpy", "src_encoding": "UTF-8", "text": "# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.5'\n# jupytext_version: 1.6.0\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# %matplotlib inline\n\n# +\n# Import Modules\nimport re\nimport os\nimport sys\nfrom ftplib import FTP\n# from progressbar import ProgressBar, Bar, ETA, FileTransferSpeed, Percentage\nimport datetime\nfrom datetime import date\nimport time\n\nimport numpy as np\nimport pandas as pd\n# import netCDF4 as nc\n\n# -\n\n# MODULE: get_ghcnd_version\n# Get which version of GHCN-D we are using\n# ################################################\ndef get_ghcnd_version():\n\n ftp = FTP('ftp.ncdc.noaa.gov')\n ftp.login()\n ftp.cwd('pub/data/ghcn/daily')\n ftp.retrbinary('RETR ghcnd-version.txt', open('ghcnd-version.txt', 'wb').write)\n ftp.quit()\n\n ghcnd_versionfile='ghcnd-version.txt'\n try:\n with open (ghcnd_versionfile, \"r\") as myfile:\n ghcnd_version=myfile.read().replace('\\n', '')\n except:\n print((\"Version file does not exist: \",ghcnd_versionfile))\n sys.exit()\n\n return ghcnd_version\n\n\n# MODULE: get_data_station\n# Fetch Individual station (.dly ASCII format)\n# ################################################\n\n# +\n# def file_write(data):\n# file.write(data) \n# global pbar\n# pbar += len(data)\n# return None\n\n# +\ndef get_data_station(station_id):\n \"\"\"Fetch Individual station (.dly ASCII format)\"\"\"\n \n data_station_path = station_id+'.dly'\n\n if not os.path.isfile(data_station_path):\n# if not os.path.isfile(file): \n print(\"DOWNLOADING DATA FOR STATION: \" + station_id)\n ftp = FTP('ftp.ncdc.noaa.gov')\n ftp.login()\n ftp.cwd('pub/data/ghcn/daily/all')\n\n \n ftp.retrbinary('RETR ' + data_station_path, open(data_station_path, 'wb').write)\n ftp.quit()\n\n# if not os.path.isfile(data_station_path): \n# print(\"DOWNLOADING DATA FOR STATION: \" + station_id)\n# data_station_file = open(data_station_path, 'wb')\n# ftp = FTP('ftp.ncdc.noaa.gov')\n# ftp.login()\n# ftp.cwd('pub/data/ghcn/daily/all')\n \n# ftp.sendcmd(\"TYPE i\") # Switch to Binary mode\n# size = ftp.size(data_station_path) \n# print(size)\n \n# widgets = ['Downloading: ', Percentage(), ' ',\n# Bar(marker='#',left='[',right=']'),\n# ' ', ETA(), ' ', FileTransferSpeed()]\n\n# pbar = ProgressBar(widgets=widgets, maxval=size)\n# pbar.start()\n \n# ftp.retrbinary('RETR ' + data_station_path, file_write)\n# ftp.quit()\n \n\n mtime = time.strftime('%Y-%m-%D %H:%M:%S',time.localtime(os.path.getmtime(data_station_path)))\n print(\"Output from local file: {} - Last modified: {}\".format(data_station_path, mtime))\n \n return data_station_path\n\n\n# -\n\ndef get_data_year(year):\n \"\"\"Fetch 1 Year of Data (.csv ASCII format)\"\"\"\n print((\"\\nGETTING DATA FOR YEAR: \",year))\n\n ftp = FTP('ftp.ncdc.noaa.gov')\n ftp.login()\n ftp.cwd('pub/data/ghcn/daily/by_year')\n ftp.retrbinary('RETR '+year+'.csv.gz', open(year+'.csv.gz', 'wb').write)\n ftp.quit()\n\n outfile=year+\".csv.gz\"\n return outfile\n\ndef get_ghcnd_stations():\n \"\"\"Read or download GHCND-D Stations File\"\"\"\n\n ghcnd_stnfile='ghcnd-stations.txt' \n \n if not os.path.isfile(ghcnd_stnfile):\n print(\"DOWNLOADING LATEST STATION METADATA FILE\")\n ftp = FTP('ftp.ncdc.noaa.gov')\n ftp.login()\n ftp.cwd('pub/data/ghcn/daily')\n ftp.retrbinary('RETR ghcnd-stations.txt', open('ghcnd-stations.txt', 'wb').write)\n ftp.quit()\n \n mtime = time.strftime('%Y-%m-%D %H:%M:%S',time.localtime(os.path.getmtime(ghcnd_stnfile)))\n print(\"Output from local file: {} - Last modified: {}\".format(ghcnd_stnfile, mtime))\n \n ghcnd_stations = np.genfromtxt(ghcnd_stnfile,delimiter=(11,9,10,7,4,30),dtype=str)\n \n return ghcnd_stations\n\ndef get_ghcnd_inventory():\n print(\"\\nGRABBING LATEST STATION INVENTORY FILE\")\n\n ftp = FTP('ftp.ncdc.noaa.gov')\n ftp.login()\n ftp.cwd('pub/data/ghcn/daily')\n ftp.retrbinary('RETR ghcnd-inventory.txt', open('ghcnd-inventory.txt', 'wb').write)\n ftp.quit()\n\n # Read in GHCND-D INVENTORY File\n ghcnd_invfile='ghcnd-inventory.txt'\n ghcnd_inventory= np.genfromtxt(ghcnd_invfile,delimiter=(11,9,11,4),dtype=str)\n\n return ghcnd_inventory\n\ndef output_to_csv(station_id):\n \n # Elements of GHCN-D as CODE: [index, divisor]\n elem_dict = {'TMAX':[0,10],\n 'TMIN':[1,10],\n 'PRCP':[2,10],\n 'SNOW':[3,1],\n 'SNWD':[4,1],\n 'AWND':[5,10],\n 'EVAP':[6,10],\n 'MNPN':[7,10],\n 'MXPN':[8,10],\n 'PSUN':[9,1],\n 'TSUN':[10,1], \n }\n num_elements = len(elem_dict)\n\n # Read in GHCN-D Data\n infile = station_id+\".dly\"\n\n try:\n print(\"Reading local copy of file: {}\".format(infile))\n file_handle = open(infile, 'r')\n ghcnd_contents = file_handle.readlines()\n file_handle.close()\n except:\n print(\"File {} not found in current working directory...\".format(infile))\n get_data_station(station_id)\n file_handle = open(infile, 'r')\n ghcnd_contents = file_handle.readlines()\n file_handle.close()\n\n # Get Year Start and End of File for time dimensions\n ghcnd_begin_year = int(ghcnd_contents[0][11:15])\n ghcnd_end_year = int(ghcnd_contents[len(ghcnd_contents)-1][11:15])\n num_years = int((ghcnd_end_year - ghcnd_begin_year) + 1)\n\n # initialize array with -9999 values, as this is the format used to represent missing values\n # will convert -9999 to np.nan later\n ghcnd_data= np.zeros((num_years,12,31,num_elements),dtype='f')-(9999.0)\n \n # Go through GHCN-D Data\n for counter in range(len(ghcnd_contents)): \n # station ID is first 11 characters\n # year starts on character 12 (or 11 when you count from zero!) \n year = int(ghcnd_contents[counter][11:15])\n month = int(ghcnd_contents[counter][15:17])\n\n year_counter = int(year - ghcnd_begin_year)\n month_counter = int(month - 1)\n\n # element is defined in char 18-21 = 17-20 counting from 0 or 17:21 in slice\n element = ghcnd_contents[counter][17:21]\n if element in elem_dict:\n element_idx = elem_dict[element][0]\n divisor = elem_dict[element][1]\n char=21 # starting character of first VALUE daily data entries in .dly file\n\n # always use 31 days per .dly file spec\n for day_counter in range(0,31): \n ghcnd_data[year_counter,month_counter,day_counter,element_idx] = float(ghcnd_contents[counter][char:char+5]) / divisor\n char = char + 8 # each daily entry is 8 characters long, 5 for value and 3 for quality codes\n \n\n # Format header for csv file\n header_string = \"YYYY,MM,DD\"\n for key in elem_dict.keys():\n header_string = header_string + \",\" + key\n header_string = header_string + \"\\n\" \n \n # Write data to csv file\n print(\"OUTPUTTING TO CSV: \" + station_id + \".csv\")\n outfile_data = station_id+'.csv'\n out_data = open(outfile_data,'w')\n out_data.write(header_string)\n\n for year_counter in range(0,num_years):\n year_str = str(year_counter+ghcnd_begin_year)\n for month_counter in range(0,12):\n month_str = str(month_counter+1)\n for day_counter in range(0,31):\n day_str = str(day_counter+1) \n# day_string = year_str + \"-\" + month_str + \"-\" + day_str\n day_string = year_str + \",\" + month_str + \",\" + day_str\n for elem_key, elem_val in elem_dict.items():\n day_string = day_string + \",\" + str(ghcnd_data[year_counter,month_counter,day_counter,elem_val[0]])\n day_string += \"\\n\"\n out_data.write(day_string)\n\n out_data.close()\n return None\n\n\ndef csv_to_dataframe(station_id):\n \n station_id_file = station_id + \".csv\"\n try:\n df = pd.read_csv(station_id_file)\n except:\n print(\"csv file not found in local directory...\")\n output_to_csv(station_id)\n df = pd.read_csv(station_id_file)\n \n df.where(df > -998, other=np.nan, errors='ignore', inplace=True)\n df.dropna(how='all', axis=1, inplace=True)\n df['Date'] = pd.to_datetime(df['YYYY'].astype(str) + ' ' + df['MM'].astype(str) + ' ' + df['DD'].astype(str), errors='coerce')\n df['Date'].where(df['Date'] < datetime.datetime.now(), other=pd.NaT, inplace=True)\n df.dropna(how='any', axis=0, inplace=True, subset=['Date'])\n df.set_index('Date', inplace=True)\n return df\n" } ]
2
ayoungh-forked/basecms
https://github.com/ayoungh-forked/basecms
618a953862ef4f774da70f19e593df7aab313eed
90daf9432db3e80da1082f76c0c4385b1678ed69
4590c19fef638d0a90622d9e265a5534ed5c9ace
refs/heads/master
2021-05-27T21:17:22.859128
2013-01-26T14:18:04
2013-01-26T14:18:04
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6377550959587097, "alphanum_fraction": 0.6513605713844299, "avg_line_length": 35.75, "blob_id": "8158a5d16692bdfe020b52f6f9ea069df5d25874", "content_id": "d9e7ef34db2b515ae566124f436ac04c4413a05b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 588, "license_type": "no_license", "max_line_length": 78, "num_lines": 16, "path": "/scripts/pphtml.py", "repo_name": "ayoungh-forked/basecms", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\"\"\"\nRead HTML from stdin and pretty print it to stdout, stripping comments.\nIT IS SO PRETTY.\n\"\"\"\nimport re\nimport sys\nimport bs4\nif __name__ == \"__main__\":\n soup = bs4.BeautifulSoup(sys.stdin.read(), \"html5lib\")\n comments = soup.findAll(text=lambda text:isinstance(text, bs4.Comment))\n [comment.extract() for comment in comments]\n output = soup.prettify(formatter = 'html')\n # Fix link output (don't add extra spaces at the end of link tags!)\n output = re.sub(r'([a-zA-Z0-9])(\\s+)\\<\\/a\\>\\s*(\\.)?', r'\\1</a>\\3', output)\n sys.stdout.write(output)\n" } ]
1
cs-fullstack-fall-2018/django-auth1-dccodecrew18
https://github.com/cs-fullstack-fall-2018/django-auth1-dccodecrew18
4e9dd170c412b3b8938e15b19c14e1b3b5e81eb9
4dea9b1afde9548cc85b79a62792b1599e93dc45
c92165174a4a556976badf87f81d83cdc3ddd3b4
refs/heads/master
2020-04-01T18:46:22.129622
2018-10-17T21:20:32
2018-10-17T21:20:32
153,513,561
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.752655565738678, "alphanum_fraction": 0.7647951245307922, "avg_line_length": 40.25, "blob_id": "0a731550367c432cb8a0ebd6ad9468d3ca3ef208", "content_id": "59ef2621a783429d1c65e11de9b4e3aadd1daffa", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 659, "license_type": "permissive", "max_line_length": 86, "num_lines": 16, "path": "/UserAuth/userapp/models.py", "repo_name": "cs-fullstack-fall-2018/django-auth1-dccodecrew18", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.db import models\nfrom datetime import datetime\nfrom django.contrib.auth.models import User\n# Create your models here.\n\nclass userModel(models.Model):\n user = models.CharField(max_length=50)\n blogtitle = models.CharField(max_length=100)\n blogentry = models.CharField(max_length=500)\n dateCreated =models.DateTimeField(default=datetime.now)\n #attributes below will be the link necessary for user authenticty\n username = models.ForeignKey(User,on_delete=models.SET_NULL,null=True,blank=True)\n\n def __str__(self):\n return self.user, self.blogtitle,self.blogentry,self.dateCreated,self.username" }, { "alpha_fraction": 0.7685185074806213, "alphanum_fraction": 0.7685185074806213, "avg_line_length": 37.57143020629883, "blob_id": "314db469b54e8f2d866149b6e3beb07552d24ee2", "content_id": "618fbe71c82a1ff9ea54ae2173742170956732e6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 540, "license_type": "permissive", "max_line_length": 84, "num_lines": 14, "path": "/UserAuth/userapp/views.py", "repo_name": "cs-fullstack-fall-2018/django-auth1-dccodecrew18", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\n\nfrom .models import userModel\n\ndef index(request):\n user_list= userModel.objects.all()\n context = {'user_list':user_list}\n return render (request,'registration/index',context)#templatesareawhere index is\nfrom django.contrib.auth.decorators import login_required\ndef userIndex(request):\n filtereduser_list = userModel.objects.filter(username=request.user)\n context = {'filtereduser_list':filtereduser_list}\n return render (request,'userapptemp/index',context)\n# Create your views here.\n" } ]
2
yongkaka/GlobalDiagnostiX
https://github.com/yongkaka/GlobalDiagnostiX
d886abef6de1688d69bcaf9ca2a38b3029e2615e
0af48b0a1ecfd4614752b762e0cecf9045eb9a9d
7ac918fdfa562ff8d4f3afbd2d45fb678d039da2
refs/heads/master
2021-01-18T13:26:41.991851
2014-01-17T10:59:20
2014-01-17T10:59:20
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6434456706047058, "alphanum_fraction": 0.6531835198402405, "avg_line_length": 24.673076629638672, "blob_id": "1d3b6ffe121c5bf59490e4a1b48f674acbeacbfd", "content_id": "660d9daccd27f961bb1e9abec09f13e46c011d45", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1335, "license_type": "permissive", "max_line_length": 75, "num_lines": 52, "path": "/nist-attenuation-scraper.py", "repo_name": "yongkaka/GlobalDiagnostiX", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nfrom BeautifulSoup import BeautifulSoup\nimport urllib2\nfrom pylab import *\n\nURL = 'http://physics.nist.gov/PhysRefData/XrayMassCoef/ComTab/muscle.html'\n\nresponse = urllib2.urlopen(URL)\nhtml = response.read()\nsoup = BeautifulSoup(html)\n\n#~ ascii = soup.find('pre') # extract ASCII formatted table\n#~ for line in ascii:\n #~ print len(str(line).split())\n\nEnergy = []\nMu = []\nMuen = []\ntable = soup.find('table')\nfor row in table.findAll('tr'):\n col = row.findAll('td')\n if len(str(col).split()) == 3:\n Energy.append(col[0].find(text=True))\n Mu.append(col[1].find(text=True))\n Muen.append(col[2].find(text=True))\nprint col[1]\n\n#~ plt.loglog(Energy,Mu,label='Mu')\n#~ plt.loglog(Energy,Muen,label='Muen')\n#~ plt.title(soup.title(text=True))\n#~ plt.legend()\n#~ plt.show()\n#~ \n\nURL = 'http://physics.nist.gov/PhysRefData/XrayMassCoef/ComTab/bone.html'\nresponse = urllib2.urlopen(URL)\nhtml = response.read()\nsoup = BeautifulSoup(html)\n\nEnergy = []\nMu = []\nMuen = []\ntable = soup.find('table')\nfor row in table.findAll('tr'):\n col = row.findAll('td')\n if len(str(col).split()) == 3:\n Energy.append(col[0].find(text=True))\n Mu.append(col[1].find(text=True))\n Muen.append(col[2].find(text=True))\nasdf = col[1](text=True)\nprint type(unicode.join(u'\\n',map(unicode,asdf)))\n" }, { "alpha_fraction": 0.5756540298461914, "alphanum_fraction": 0.5854348540306091, "avg_line_length": 46.674156188964844, "blob_id": "4b7d24e2bdf5a7cb7bab8e10a0832a257c128cac", "content_id": "c912bf42f5d6b4ec64ea5161be230d43959705ac", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8486, "license_type": "permissive", "max_line_length": 79, "num_lines": 178, "path": "/DetectWhichImageIsRadiography.py", "repo_name": "yongkaka/GlobalDiagnostiX", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nScript to load the set of images acquired in the x-ray lab.\nSince we acquire lots of images before, during and after exposure it is\nreally annoying to manually sift through all the images in all the\ndirectories and to look for the 'best' exposure.\nThis script loads computes the mean of each image in each directory and\ngives out the maximum of the this mean.\nThis should be the 'best' exposed image of all the exposures.\n\"\"\"\n\nfrom __future__ import division\nimport glob\nimport os\nimport subprocess\nimport matplotlib.pyplot as plt\nimport shutil\n\n\n# Setup\n# Show the plot with the means. The plot is saved regardless of this setting\nShowPlot = False\n# Load the images as a stack in Fiji\nShowStack = False\n# Threshold X to delete folders\n# * with images with a mean smaller than X,\n# * where the darkest and brightest image differ by only X grey levels\n# * and images which are darker than 10X % of the second darkest image\nThreshold = 5\n# Delete Images or not\nDelete = True\n\nStartingFolder = ('/afs/psi.ch/project/EssentialMed/Images/' +\n '12-GOTTHARD_and_TIS/TIS/')\n\n# Get list of (only) directories in StartingFolder\n# http://stackoverflow.com/a/973488\nFolderList = [x[0] for x in os.walk(StartingFolder)]\n\n# Get list of files in each folder, these are the exposures we acquired\nExposures = [sorted(glob.glob(os.path.join(Folder, '*.jpg')))\n for Folder in FolderList]\n\n# Iterate through each folder, calculate the mean of each image in it and plot\n# this mean. 'os.walk' includes the base directory, we thus start from 1.\nfor i in range(1, len(Exposures)):\n plt.figure(figsize=[16, 9])\n print 20 * '-', i, '/', len(Exposures) - 1, 20 * '-'\n print 'Getting the mean of', len(Exposures[i]), 'Images from', \\\n os.path.basename(FolderList[i])\n MeanValue = [plt.imread(Image).mean() for Image in Exposures[i]]\n print 'The mean value of the images varies between', \\\n round(min(MeanValue), 2), 'and', round(max(MeanValue), 2)\n print 'A maximum of', round(max(MeanValue), 2), 'was found in image', \\\n MeanValue.index(max(MeanValue)), 'which corresponds to', \\\n os.path.basename(Exposures[i][MeanValue.index(max(MeanValue))])\n # We plot the mean on the left side of a figure, with some additiona\n # information on it (Maximum and deletion criterion defined by\n # 'Threshold').\n plt.subplot(1, 2, 1)\n plt.plot(MeanValue, label='Mean Value', marker='o')\n plt.axhline(y=max(MeanValue), color='g',\n label=''.join(['Max@', str(round(max(MeanValue), 2))]))\n plt.axhline(y=sorted(MeanValue)[1] * (1 + Threshold / 100), color='r',\n label=''.join(['Deletion<',\n str(round(sorted(MeanValue)[1] *\n (1 + Threshold / 100), 2))]))\n plt.legend(loc=4)\n plt.xlabel('Mean')\n plt.ylabel('Image index')\n plt.title(' '.join(['Mean of', str(len(Exposures[i])),\n 'images in\\n',\n str(os.path.basename(FolderList[i]))]))\n plt.ylim(ymin=0)\n # The right side of the plot shows the image in which we found the highest\n # mean and the two adjacent ones (if present).\n plt.subplot(3, 2, 2)\n try:\n plt.imshow(plt.imread(Exposures[i][MeanValue.index(max(MeanValue)) -\n 1]), origin='lower')\n except LookupError:\n print os.path.basename(Exposures[i][MeanValue.index(max(MeanValue))]\n ), '-1 could not be loaded'\n plt.title(' '.join(['maximal value of', str(round(max(MeanValue), 2)),\n '\\nfound in',\n str(os.path.basename(\n Exposures[i][MeanValue.index(max(MeanValue))])),\n '\\nshowing this image (middle) and the two adjacent']))\n plt.subplot(3, 2, 4)\n plt.imshow(plt.imread(Exposures[i][MeanValue.index(max(MeanValue))]),\n origin='lower')\n plt.subplot(3, 2, 6)\n try:\n plt.imshow(plt.imread(Exposures[i][MeanValue.index(max(MeanValue)) +\n 1]), origin='lower')\n except LookupError:\n print os.path.basename(Exposures[i][MeanValue.index(max(MeanValue))]\n ), '+1 could not be loaded'\n plt.savefig(os.path.join(StartingFolder,\n os.path.basename(FolderList[i]) + '.pdf'))\n if ShowPlot:\n plt.show()\n # After the plotting, we elete unnecessary files. But we only delete, if we\n # have more than 20 images still present in the current folder\n # \t* Delete the whole image directory if *all* images are below\n # \t'Threshold' Threshold\n # \t* Delete the whole image directory if the darkest and brightest image\n # have a difference of less than 'Threshold'\n # * Delete all images with are not 'Threshold'-% brighter than the\n # *second*-darkest image\n # See if all images are smaller than 'Threshold'. If yes, remove directory\n if max(MeanValue) < Threshold:\n print\n print 'None of the images has a mean larger than the Threshold of', \\\n str(Threshold) + '.'\n print 'I am thus deleting the whole directory...'\n shutil.rmtree(FolderList[i])\n # See if brightest and darkest image differ by more than 'Threshold'. If\n # not, delete the whole directory\n elif (max(MeanValue) - min(MeanValue)) < Threshold:\n print\n print 'The mean of the brightest (' + str(round(max(MeanValue), 2)) +\\\n ') and the darkest image (' + str(round(min(MeanValue), 2)) +\\\n ') have a difference smaller than', str(Threshold) + '.'\n print 'I am thus deleting the whole directory...'\n shutil.rmtree(FolderList[i])\n # Delete images which are darker than a bit more than the second-darkest\n # image, these are generally just noise/background.\n else:\n print 'Looking for images with a mean value between the minimum (' +\\\n str(round(min(MeanValue), 2)) + ') and', 100 + Threshold,\\\n '% of the second-brightest image (' +\\\n str(round(sorted(MeanValue)[1] * (1 + Threshold / 100), 2)) + ')'\n # Create a list of which file can be deleted\n Deletion = [Mean < sorted(MeanValue)[1] * (1 + Threshold / 100)\n for Mean in MeanValue]\n for File in range(len(Exposures[i])):\n print os.path.basename(Exposures[i][File]), \\\n 'has a mean of', round(MeanValue[File], 2), 'and',\n if Deletion[File]:\n # Only delete if we have more than 15 images in the folder\n if Delete and len(Exposures[i]) > 15:\n print 'is deleted'\n os.remove(Exposures[i][File])\n else:\n print 'could be deleted'\n else:\n print 'is kept'\n\n # Open the remaining images as a stack in Fiji, if desired\n if ShowStack:\n # First check if the folder still exists, otherwise don't do anything\n if os.path.isdir(FolderList[i]):\n # Constructing Fiji call. We open Fiji in the current directory (so\n # saving is in that one), open all the images in that directory\n # as stack and save it out as _average.tif and _sum.tif.\n viewcommand = '/scratch/Fiji.app/ImageJ-linux32 -eval' +\\\n ' \"run(\\\\\"Image Sequence...\\\\\", \\\\\"open=' +\\\n os.path.abspath(FolderList[i]) + ' file=snapshot' +\\\n ' convert\\\\\"); run(\\\\\"Z Project...\\\\\",' +\\\n ' \\\\\"projection=[Average Intensity]\\\\\"); run(\\\\\"Save\\\\\",' +\\\n ' \\\\\"save=' + os.path.join(os.path.abspath(FolderList[i]),\n '_average.tif') +\\\n '\\\\\"); run(\\\\\"Close\\\\\"); run(\\\\\"Z Project...\\\\\",' +\\\n ' \\\\\"projection=[Sum Slices]\\\\\"); run(\\\\\"Save\\\\\", \\\\\"save=' +\\\n os.path.join(os.path.abspath(FolderList[i]), '_sum.tif') +\\\n '\\\\\"); run(\\\\\"Close\\\\\");\"'\n print 'Starting Fiji with the command'\n print '---'\n print viewcommand\n print '---'\n print 'Quit Fiji to proceed...'\n\n with open(os.devnull, 'wb') as devnull:\n subprocess.call(viewcommand, stdout=devnull,\n stderr=subprocess.STDOUT, shell=True)\n" }, { "alpha_fraction": 0.765625, "alphanum_fraction": 0.7734375, "avg_line_length": 37.956520080566406, "blob_id": "2039dcc9d29771a1967924de7121e23c9f054d77", "content_id": "bd1d78d62f099e4ac02bb5b039ffb8623ba57c4e", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 896, "license_type": "permissive", "max_line_length": 80, "num_lines": 23, "path": "/README.md", "repo_name": "yongkaka/GlobalDiagnostiX", "src_encoding": "UTF-8", "text": "# Develpopment Repository for OmmatiDiag\n\nThis repo tracks code (mostly Python) written while working on OmmatiDiag, the\naffordable, reliable and standard-compliant detector for the\n[GlobalDiagnostiX][GDX]-system.\n\n[GDX]: http://globaldiagnostix.org\n\nThe files follow the [PEP 8 -- Style Guide for Python Code][pep8] and the\n[Git branching model described by Vincent Driessen][branching] as closely as\npossible.\n\n[pep8]: http://www.python.org/dev/peps/pep-0008/ \n[branching]: http://nvie.com/posts/a-successful-git-branching-model/\n\nSince this repository is part of a scientific project and is depending on\na particular set of hardware, it seems silly to make it closed source, the whole\nthing is covered by [an (un)license](LICENSE).\n\n[I, the author] of those files am very grateful for pull requests, (better)\nideas on how to implement certain things and tips!\n\n[I]: http://davidhaberthuer.ch/\n" }, { "alpha_fraction": 0.5315402746200562, "alphanum_fraction": 0.6015866994857788, "avg_line_length": 38.601531982421875, "blob_id": "80fed0b0fddc79a44a617d73d130e1b3f7655e81", "content_id": "0bb2969eaab180ab02bfdf8f894703e49619fac5", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10349, "license_type": "permissive", "max_line_length": 79, "num_lines": 261, "path": "/DoseCalculation.py", "repo_name": "yongkaka/GlobalDiagnostiX", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nWe'd like to know a bit more about the dose we inflict on the patient.\nThis script is used to calculate said dose based on the x-ray spectra that we\nwill be able to set (see Source-Specifications).\n\"\"\"\nfrom __future__ import division # fix integer division\nfrom optparse import OptionParser\nimport sys\nimport os\nimport numpy as np\nfrom scipy import constants\n\n# Use Pythons Optionparser to define and read the options, and also\n# give some help to the user\nparser = OptionParser()\nusage = \"usage: %prog [options] arg\"\nparser.add_option('-v', '--kv', dest='kV',\n type='float',\n metavar='53',\n default=90,\n help='Tube peak voltage [kV] you would like to calcuate the '\n 'dose for. The script only accepts voltages that are '\n 'in the specs (and tells you if you set others). '\n 'Defaults to %default kV, which is the WHO setting for '\n 'lumbar spine.')\nparser.add_option('-m', '--mas', dest='mAs',\n type='float',\n metavar='1.6',\n default=125,\n help='mAs settings. Defaults to %default mAs, which is the '\n 'WHO setting for lumbar spine.')\nparser.add_option('-e', '--exposuretime', dest='Exposuretime',\n type='float',\n metavar='100',\n default=1000,\n help='Exposure time [ms]. Defaults to 1 second, because we '\n 'assume that \"-m\" (mAs) is used as input. If the user '\n 'insists, an exposure time can be set.')\nparser.add_option('-d', '--distance', dest='Distance',\n type='float',\n metavar='100',\n default=140,\n help='Source-Detector distance [cm]. Defaults to %default'\n 'cm')\nparser.add_option('-l', '--length', dest='Length',\n type='float',\n metavar='15',\n default=43.,\n help='Length of the (square) FOV [cm]. Defaults to %default '\n 'cm.')\nparser.add_option('-t', '--thickness', dest='Thickness',\n type='float',\n metavar='13',\n default=15.,\n help='Patient or sample thickness [cm]. Used to calculate '\n 'attenuation. Defaults to %default cm.')\nparser.add_option('-c', '--chatty', dest='chatty',\n default=False, action='store_true',\n help='Be chatty. Default: Tell us only the relevant stuff.')\n(options, args) = parser.parse_args()\n\n# show the help if no parameters are given\nif options.kV is None:\n parser.print_help()\n print 'Example:'\n print 'The command below calculates the dose for a peak tube voltage of',\\\n '60 kV.'\n print\n print sys.argv[0], '-v 60'\n exit(1)\n\n# Inform the user that we only have certain values to work with\nVoltage = [46, 53, 60, 70, 80, 90, 100, 120]\nif not options.kV in Voltage:\n print 'You can only enter one of these voltages:',\\\n str(Voltage).strip('[]'), 'kV'\n print\n print 'Try again with the nearest allowed value:'\n # http://stackoverflow.com/a/9706105/323100\n print sys.argv[0], '-v', Voltage[min(range(len(Voltage)),\n key=lambda i:abs(Voltage[i] -\n options.kV))]\n exit(1)\n\nChosenVoltage = Voltage.index(options.kV)\n# Load spectra\nSpectraPath = os.path.join(os.getcwd(), 'Spectra')\n# Construct file names, then load the data with the filenames (we could do this\n# in one step, but like this it's easier to debug. 'SpectrumData' is the data\n# without comments, thus we read the mean energy on line 7 in a second step\nSpectrumLocation = [os.path.join(SpectraPath, 'Xray-Spectrum_' +\n str(\"%03d\" % kV) + 'kV.txt')\n for kV in Voltage]\nSpectrumData = [(np.loadtxt(FileName)) for FileName in SpectrumLocation]\nMeanEnergy = [float(open(FileName).readlines()[5].split()[3]) for FileName in\n [os.path.join(SpectraPath, 'Xray-Spectrum_' + str(\"%03d\" % kV) +\n 'kV.txt') for kV in Voltage]]\nif options.chatty:\n for v, e in zip(Voltage, MeanEnergy):\n print 'Peak tube voltage', v, 'kV = mean energy', int(round(e)), 'keV'\n\nprint 'For a peak tube voltage of', options.kV, 'kV and a current of',\\\n int(round(options.mAs / (options.Exposuretime / 1000.))), 'mAs (exp.',\\\n 'time', options.Exposuretime, 'ms) we get a mean energy of',\\\n round(MeanEnergy[ChosenVoltage], 3), 'keV.'\nprint\n\n# Calculate the numbers of photons emitted from the tube.\nPhotonEnergy = (MeanEnergy[ChosenVoltage] / 1000) * constants.e # Joules\nprint 'At this mean energy, a single photon has an energy of',\\\n '%.3e' % PhotonEnergy, 'J.'\nprint\n\n# Surface entrance dose\n# The K-value is based on the machine. The BAG-calculator (see below) list 0.1\nK = 0.1 # mGy m^2 mAs^-1\n# BSF found by Arouna2000, cited by BAG2012. Gives the same SED as the\n# XLS-calculator from BAG (http://is.gd/oTpniQ)\nBSF = 1.35\n\n# calculating while converting Focusdistance from m to cm\nSED = K * (options.kV / 100) ** 2 * options.mAs *\\\n (100 / options.Distance) ** 2 * BSF\nprint 'The surface entrance dose for an x-ray pulse with'\nprint ' * U =', options.kV, 'kV'\nprint ' * Q =', options.mAs, 'mAs'\nprint ' * FOD =', options.Distance / 100, 'm'\nprint ' * K =', K, 'mGy*m²/mAs'\nprint ' * BSF =', BSF\nprint 'is SED = K*(U/100)^2*Q*(1/FOD)^2*BSF =', round(SED, 3), 'mGy (mJ/kg).'\nprint\n\n# Correspond SED to photon count\nN0 = SED / PhotonEnergy\nprint 'A SED of', '%.3e' % (SED / 1000), 'Gy (mJ/kg) corresponds to',\\\n '%.3e' % N0, 'absorbed photons per kg (with a photon',\\\n 'energy of', '%.3e' % PhotonEnergy, 'J per photon).'\nprint 'This SED can be calculated back to a number of photons with',\\\n 'N=(UI/E)*eta*(Area/4πr²) and corresponds to',\n\neta = 1e-9 # *ZV\n# Calculate the number of photons from the tube to the sample\n#~ N0 = (VI/E)*eta*(A/4Pir²)\nN0 = (options.kV * ((options.mAs * 1000) / (options.Exposuretime/1000))) /\\\n PhotonEnergy * eta *\\\n ((options.Length ** 2) / (4 * np.pi * options.Distance ** 2))\nprint '%.4e' % N0, 'photons with a mean energy of,', PhotonEnergy\n\nprint 'We assume these photons are all the photons that reached the patient,',\\\n 'and thus can calculate the photon flux from this.'\n\nFlux = N0 / (options.Exposuretime / 1000)\nprint 'With an exposure time of', options.Exposuretime, 'ms the',\\\n 'aforementioned number of photons corresponds to a photon flux of',\\\n '%.3e' % Flux, 'photons per second (from the source to the patient',\\\n 'surface.'\n\n# Attenuation in Patient\nAttenuationCoefficient = 0.5 # For calculation we just simply assume 50%.\n# We NEED to read the data from the NIST tables, but they're in shutdown now...\nprint 'Attenuation coefficient set to', AttenuationCoefficient, 'cm^-1 (@' +\\\n str(Voltage[ChosenVoltage]), 'kV)'\n# Number of absorbed photons\n# N = N0(e^-uT)\nN = N0 * (np.exp((-AttenuationCoefficient * (options.Thickness/100))))\nprint 'Assuming an attenuation coefficient of', AttenuationCoefficient, 'and',\\\n 'a penetration depth of', options.Thickness, 'cm we have (according to',\\\n 'the Beer-Lambert law (N = N0 * e^-uT)'\nprint ' *', '%.3e' % N, 'photons after the xrays have passed the patient'\nprint ' * thus', '%.3e' % (N0 - N), 'photons were absorbed'\nprint ' * the intensity dropped to', round((N/N0)*100, 2), '%'\n\nprint\nprint\nprint 'Use nist-attenuation-scraper.py to get the correct attenuation!'\n\nexit()\n\n\n\n\n\n\n\n\n\n# Attenuation Coefficients\n# @40kV, half bone, half muscle\nAttenuationCoefficient = []\nAttenuationCoefficient.append(np.mean((2.685e-1, 6.655-1)))\n# @70kV (0.5*60+0.5*80), both half bone, half muscle\nAttenuationCoefficient.append(np.mean((np.mean((2.048e-01, 3.148e-01)),\n np.mean((1.823e-01, 2.229e-01)))))\n\n'''\nSkeletal muscle (http://is.gd/D88OFv)\n Energy μ/ρ μen/ρ\n (MeV) (cm2/g) (cm2/g)\n 1.00000E-02 5.356E+00 4.964E+00\n 1.50000E-02 1.693E+00 1.396E+00\n 2.00000E-02 8.205E-01 5.638E-01\n 3.00000E-02 3.783E-01 1.610E-01\n 4.00000E-02 *2.685E-01* 7.192E-02\n 5.00000E-02 2.262E-01 4.349E-02\n 6.00000E-02 *2.048E-01* 3.258E-02\n 8.00000E-02 *1.823E-01* 2.615E-02\n 1.00000E-01 1.693E-01 2.544E-02\n 1.50000E-01 1.492E-01 2.745E-02\n 2.00000E-01 1.358E-01 2.942E-02\nCortical bone (http://is.gd/2176eQ)\n Energy μ/ρ μen/ρ\n (MeV) (cm2/g) (cm2/g)\n 1.00000E-02 2.851E+01 2.680E+01\n 1.50000E-02 9.032E+00 8.388E+00\n 2.00000E-02 4.001E+00 3.601E+00\n 3.00000E-02 1.331E+00 1.070E+00\n 4.00000E-02 *6.655E-01* 4.507E-01\n 5.00000E-02 4.242E-01 2.336E-01\n 6.00000E-02 *3.148E-01* 1.400E-01\n 8.00000E-02 *2.229E-01* 6.896E-02\n 1.00000E-01 1.855E-01 4.585E-02\n 1.50000E-01 1.480E-01 3.183E-02\n 2.00000E-01 1.309E-01 3.003E-02\n'''\n\nr = 140 # cm, Distance from source to sample\neta = 1e-9 # *ZV\nZ = 74 # Tungsten\neV = 1.602e-19 # J\nQFactor = 1 # http://en.wikipedia.org/wiki/Dosimetry#Equivalent_Dose\nWeightingFactor = 0.12 # http://en.wikipedia.org/wiki/Dosimetry#Effective_dose\nExposureTime = 1000e-3 # s\n\n# Calculate the number of photons from the tube to the sample\n#~ N0 = (VI/E)*eta*(A/4Pir²)\nN0 = (Voltage * Current) / (Voltage * eV) * \\\n eta * Z * Voltage * \\\n Area / (4 * np.pi * r ** 2)\nprint ' - the tube emitts %.4e' % N0, 'photons per second'\n\n# Absorbed radiation dose per second\n#~ Da = Eneregy / Weight # J/kg per second\nDa = N * AverageEnergy[case] * 1000 * eV / Weight\n\nprint ' -', round(Da * 1000, 4), 'mGy/s are absorbed by the sample,',\\\n ' if we assume it is', Weight, 'kg'\n\n# Effective dose per second\n#~ De = Da * Wr, WR = Q * N\nDe = Da * QFactor * WeightingFactor\n\nprint ' -', round(De*1000, 4), 'mSv/s is the effective dose'\n\n# Total effective dose on the sample\nD = De * ExposureTime\n\nprint ' -', round(D*1000, 4), 'mSv is the effective dose on the',\\\n 'sample for an exposure time of =', ExposureTime, 's)'\n" }, { "alpha_fraction": 0.659853458404541, "alphanum_fraction": 0.6927745342254639, "avg_line_length": 44.8425178527832, "blob_id": "fef33909329b55802a2938b18be22c75be5fc896", "content_id": "d0cb6ef7eb6200d9899a7b4f0a830685a2bd6de6", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17466, "license_type": "permissive", "max_line_length": 397, "num_lines": 381, "path": "/CalculateDetector.py", "repo_name": "yongkaka/GlobalDiagnostiX", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\n# You can run this script to produce several frames of output as so:\n# (or use the command at the end of the script to also start Fiji and do some more stuff)\n\"\"\"\nfor f in {10..15..1};do for o in {45..50..1};do for s in {5..10..1};do ./CalculateDetector.py -f $f -o $o -s $s -p;done;done;done\n\"\"\"\n\nfrom pylab import *\nimport numpy\nfrom scipy import constants\nfrom scipy import integrate\nfrom matplotlib.patches import Wedge\nfrom matplotlib2tikz import matplotlib2tikz\nfrom optparse import OptionParser\nimport sys\nimport os\n\n###################### SETUP ######################\n\n# Use Pythons Optionparser to define and read the options, and also\n# give some help to the user\nparser = OptionParser()\nusage = \"usage: %prog [options] arg\"\nparser.add_option('-s', '--ScreenSize', dest='FOV', type='float', default=4.5, \n\thelp='Field of view in centimeters, i.e. desired screen size (default=43 cm)',\n\tmetavar='43')\nparser.add_option('-o', '--OpeningAngle', dest='OpeningAngle', default=90.0, type='float',\n\thelp='Opening angle of the lens in degrees (default=90)',\n\tmetavar='45')\nparser.add_option('-n', '--NumericalAperture', dest='NA', default=0.4, type='float',\n\thelp='Numerical Aperture of the lens',\n\tmetavar='0.6')\nparser.add_option('-f', '--FStop', dest='FStop', default=1.2, type='float',\n\thelp='F-Stop of the lens',\n\tmetavar='0.8')\t\nparser.add_option('-c', '--CCDSize', dest='SensorSize', default=3.0, type='float',\n\thelp='Size of the CCD/CMOS sensor (in millimeters!), Default=7 mm/0.7 cm',\n\tmetavar='7')\nparser.add_option('-e', '--Energy', dest='InputEnergy', default=50.4, type='float',\n\thelp='Energy of the x-ray photons in kV (default=50 kV)',\n\tmetavar='120')\nparser.add_option('-l', '--LinePairs', dest='LinePairs', default=5.0, type='float',\n\thelp='Desired resolution in lp/mm (default=2.5 lp/mm)',\n\tmetavar='4')\nparser.add_option('-p', '--print', dest='Output', default=False, action='store_true',\n\thelp='Save/Print the files to disk as \"DetectorConfiguration_wd_XX_fov_XX_angle_XX_sensor_XX_energy_XX.png and .txt',\n\tmetavar = 1)\n(options, args) = parser.parse_args()\noptions.SensorSize = options.SensorSize / 10\noptions.InputEnergy = options.InputEnergy * 1000\n\n# show the help if some important parameters are not given\nif options.FOV==None \\\n\tor options.OpeningAngle==None \\\n\tor options.SensorSize==None \\\n\tor options.InputEnergy==None \\\n\tor options.LinePairs==None:\n\tparser.print_help()\n\tprint 'Example:'\n\tprint 'The command below shows you the configuration for a setup with a screen size of 20.5 cm (half the required size), a lens with an opening angle of 45 deg, a small sensor of 7 mm and an x-ray energy of 50 kV:'\n\tprint ''\n\tprint sys.argv[0], '-s 20.5 -o 45 -c 7 -e 50'\n\tprint ''\n\tsys.exit(1)\n\nprint 80 * '_'\n\n###################### CALCULATE ######################\n# Intensifying screen\n# http://www.sprawls.org/ppmi2/FILMSCR/:\n# > Although the total energy of the light emitted by a screen is much less than the total x-ray energy the screen receives, the light energy is much more efficient in exposing film because it is \"repackaged\" into a much larger number of photons. If we assume a 5% energy conversion efficiency, then one 50-keV x-ray photon can produce 1,000 blue-green light photons with an energy of 2.5 eV each.\nScreenAbsorption = 0.1\nScreenConversion = 0.5\nScreenEmission = 1\n\nScreenOutput = ScreenAbsorption * ScreenConversion * ScreenEmission\n\nWavelength = 500e-9 # nm (green according to http://is.gd/AWmNpp)\n#~ E = h * nu, nu = c / lambda\nPhotonEnergyJ = constants.h * constants.c / Wavelength\nPhotonEnergyeV = PhotonEnergyJ/constants.eV\n#~ print 'Visible light photons with a wavelength of',int(Wavelength*1e9),\\\n\t#~ 'nm have an energy of',round(PhotonEnergyJ,22),'J or',\\\n\t#~ round(PhotonEnergyeV,3),'eV.'\n\nPhotonsAfterScintillator = options.InputEnergy/PhotonEnergyeV * ScreenOutput\nprint 'For each', options.InputEnergy/1000, 'kV x-ray photon'\nprint ' * we have', int(round(PhotonsAfterScintillator)), 'visible light',\\\n 'photons after the scintillator (with a'\nprint ' conversion efficiency of', ScreenOutput * 100, '%).'\n\n# Lens\nLensReflectance = 0.02\nLensAbsorption = 0.02\n# Assume a set of double plano-convex lenses, with 4% loss per lens\nLensTransmission = 1 - (2 * LensReflectance) - (2 * LensAbsorption)\nPhotonsAfterLens = PhotonsAfterScintillator*LensTransmission\n#~ tan(\\alpha/2) = (FOV/2) / Distance\n#~ Distance = (FOV/2)/tan(\\alpha/2)\nWorkingDistance = (options.FOV/2)/numpy.tan(numpy.deg2rad(options.OpeningAngle)/2)\n\nprint ' * we have', int(round(PhotonsAfterLens)), 'visible light photons',\\\n 'after the lens couple (with a'\nprint ' transmission of', LensTransmission * 100, '%).'\n\n# Sensor\nQESensor = 0.4\nProducedElectrons = PhotonsAfterLens * QESensor\nDemagnification = options.FOV / options.SensorSize\nSensorPosition = WorkingDistance / Demagnification\n\nprint ' * we get', int(round(ProducedElectrons)), 'electrons on the',\\\n 'detector (with a QE of', str(QESensor) + ').'\n\nexit()\n\n# LinePairs\nLinePairsScintillator = options.FOV*10 * options.LinePairs\nPixelsNeeded = LinePairsScintillator * 2\nSensorPixelSize = options.SensorSize / PixelsNeeded\n\n# Comparison with Flatpanel detectors\nFlatPanelPixelSize = 0.194 # mm\nScintillatorThickness = 1.0 # mm\nConversionEfficiency = 1.0\nNumericalApertureCalculated = FlatPanelPixelSize / (ScintillatorThickness / 2)\nNumericalApertureAverage = integrate.quad(lambda x: np.arctan(FlatPanelPixelSize / ( 2 * x )), 0.01, 1)[0]\nNumericalApertureDetermined = (SensorPosition * 10) / ( options.FStop * 2 * SensorPosition * 10 / ( 1 / Demagnification))\nFStopJBAG = 0.8\nNumericalApertureJBAG = 1 / ( 2 * FStopJBAG )\n\n###################### PLOT ######################\n# Plot optical configuration\n# Draw the stuff we calculated above\nfig = plt.figure(1,figsize=(32,18))\nThickness = 1.0\nSupportThickness = 0.5\nXRaySourcePosition = 25\n\n## Optical Configuration\nplt.subplot(211)\nplt.axis('equal')\n#~ axes = plt.gca()\n#~ axes.axes.get_yaxis().set_ticks([])\nplt.title('Angular opening: ' + str('%.2f' % options.OpeningAngle) + ', Screen size: ' +\\\n\tstr('%.2f' % options.FOV) + 'cm, Working Distance: ' +\\\n\tstr('%.2f' % round(WorkingDistance,2)) + 'cm\\n' +\\\n\t'Scintillator Efficiency: ' + str(round(ScreenOutput,2)*100) +'%, ' +\\\n\t'Lens transmission: ' + str(round(LensTransmission,2)*100) +'%, ' +\\\n\t'QE sensor: ' + str(QESensor))\nplt.xlabel('Distance [cm]')\nplt.ylabel('Distance [cm]')\n\n### Optical Axis\nplt.axhline(color='k',linestyle='--')\n\n### X-rays\nx=np.arange(0,XRaySourcePosition-Thickness-SupportThickness,0.1)\nfor yshift in np.arange(-options.FOV/2,options.FOV/2,options.FOV/10.0):\n\tplt.plot(-x-Thickness-SupportThickness,sin(x)+yshift,'k')\n\n### Scintillator\nScintillatorSupport = Rectangle((-Thickness-SupportThickness,(options.FOV/2)+SupportThickness),Thickness+SupportThickness,-options.FOV-SupportThickness*2,facecolor=\"black\")\ngca().add_patch(ScintillatorSupport)\t\nScintillator = Rectangle((-Thickness,options.FOV/2),Thickness,-options.FOV,facecolor=\"lightgreen\")\ngca().add_patch(Scintillator)\n\n### Light-Cone\n#### Opening angle\nwedgecolor = 'r'\nWedge = Wedge((WorkingDistance,0),-WorkingDistance*.25,-(options.OpeningAngle/2), (options.OpeningAngle/2),fill=False, color=wedgecolor)\nplt.gca().add_patch(Wedge)\n\n#### Light Beams\nbeamcolor = wedgecolor\n##### Scintillator - Lens\nplt.plot([0,WorkingDistance],[options.FOV/2,0],beamcolor)\nplt.plot([0,WorkingDistance],[-options.FOV/2,0],beamcolor)\n##### Lens - Sensor\nplt.plot([WorkingDistance,WorkingDistance+SensorPosition],[0,options.FOV/2/Demagnification],beamcolor)\nplt.plot([WorkingDistance,WorkingDistance+SensorPosition],[0,-options.FOV/2/Demagnification],beamcolor)\n\n### Camera\nSensor = Rectangle((WorkingDistance+SensorPosition,options.SensorSize/2),Thickness/4,-options.SensorSize,facecolor=\"black\")\ngca().add_patch(Sensor)\nHousing = Rectangle((WorkingDistance+SensorPosition+Thickness/4,options.SensorSize/2/.618),Thickness/4/.618,-options.SensorSize/.618,facecolor=\"black\")\ngca().add_patch(Housing)\n\n## Text\nstep = options.FOV / 8.0\nplt.text(1.618*WorkingDistance,options.FOV/2,'- 1 ' + str(options.InputEnergy/1000) + ' kV x-ray photon')\nplt.text(1.618*WorkingDistance,options.FOV/2-step,'- ' + str(int(PhotonsAfterScintillator)) + ' ' + str(Wavelength*1e9) + ' nm photons after scintillator')\nplt.text(1.618*WorkingDistance,options.FOV/2-2*step,'- ' + str(int(PhotonsAfterLens)) + ' ' + str(Wavelength*1e9) + ' nm photons after lens')\nplt.text(1.618*WorkingDistance,options.FOV/2-3*step,'- ' + str(int(ProducedElectrons)) + ' electrons on sensor')\nplt.text(1.618*WorkingDistance,options.FOV/2-4*step,'- Opening Angle: ' + str(options.OpeningAngle) + ' deg') # http://is.gd/pxodor\nplt.text(1.618*WorkingDistance,options.FOV/2-5*step,'- Sensorsize: '+ str(options.SensorSize)+' cm')\nplt.text(1.618*WorkingDistance,options.FOV/2-6*step,'- Demagnification: ' + str('%.2f' % Demagnification) + 'x')\nplt.text(1.618*WorkingDistance,options.FOV/2-7*step,'- To achieve ' + str('%.2f' % options.LinePairs) + ' lp/mm, we need')\nplt.text(1.618*WorkingDistance,options.FOV/2-8*step,' a sensor with ' + str(round(PixelsNeeded**2/1e6,2)) + ' Mpx (' + str(int(PixelsNeeded)) + 'x' + str(int(PixelsNeeded)) + ' px)')\nplt.text(1.618*WorkingDistance,options.FOV/2-9*step,' resulting in a pixelsize of ' + str('%.2f' % (SensorPixelSize*1000)) + ' um.')\n\n# Plot NA\nplt.subplot(234)\nplt.axis('equal')\nMagnification = np.arange(0,1.01,0.01)\nfor FStop in [0.5,0.8,1,1.2,1.4,2]:\n\tplt.plot(Magnification,Magnification / ( 2 * FStop * ( 1 + Magnification )),label='f/'+str('%0.2f' % FStop))\nplt.plot(Magnification,Magnification / ( 2 * options.FStop * ( 1 + Magnification )),'g--',linewidth=5,label='f/'+str('%0.2f' % options.FStop))\nplt.legend(loc='upper left')\nplt.hlines(NumericalApertureAverage,0,1)\nplt.text(0.618,NumericalApertureAverage,'NA flat panel')\nplt.hlines(NumericalApertureDetermined,0,1)#,'g','--')\nplt.text(0.618,NumericalApertureDetermined,'simulated NA of our lens')\nplt.hlines(NumericalApertureJBAG,0,1)#,'g','--')\nplt.text(0.618,NumericalApertureJBAG,'NA JBAG (?)')\nplt.vlines(1/Demagnification,0,1,'g','--')\nplt.text(1/Demagnification+0.25,0.8,'Our calculated\\nDemagnification: ' + str(Demagnification) + 'x=' + str(round(1/Demagnification,3)))\n\nplt.title('NA')\nplt.xlabel('Magnification')\nplt.ylabel('NA')\nplt.xlim([0,1])\n\n# Plot X-ray spectra\nplt.subplot(235)\n# http://stackoverflow.com/a/11249430/323100\nSpectra = [\n\t(os.path.join(os.getcwd(), 'Spectra/Xray-Spectrum_046kV.txt')),\n\t(os.path.join(os.getcwd(), 'Spectra/Xray-Spectrum_053kV.txt')),\n\t(os.path.join(os.getcwd(), 'Spectra/Xray-Spectrum_060kV.txt')),\n\t(os.path.join(os.getcwd(), 'Spectra/Xray-Spectrum_070kV.txt')),\n\t(os.path.join(os.getcwd(), 'Spectra/Xray-Spectrum_080kV.txt')),\n\t(os.path.join(os.getcwd(), 'Spectra/Xray-Spectrum_090kV.txt')),\n\t(os.path.join(os.getcwd(), 'Spectra/Xray-Spectrum_100kV.txt')),\n\t(os.path.join(os.getcwd(), 'Spectra/Xray-Spectrum_100kV.txt')),\n\t(os.path.join(os.getcwd(), 'Spectra/Xray-Spectrum_120kV.txt'))\n\t]\n\nAnodeMaterial = [ str(open(FileName).readlines()[1].split()[3]) for FileName in Spectra ]\nEnergy = [ int(open(FileName).readlines()[3].split()[7]) for FileName in Spectra ]\nRipple = [ double(open(FileName).readlines()[4].split()[4]) for FileName in Spectra ]\nAirKerma = [ double(open(FileName).readlines()[5].split()[3]) for FileName in Spectra ]\nMeanEnergy = [ np.round(double(open(FileName).readlines()[6].split()[3]),decimals=2) for FileName in Spectra ]\nFilterMaterial = [ str(open(FileName).readlines()[9].split()[1]) for FileName in Spectra ]\nFilterThickness = [ int(open(FileName).readlines()[9].split()[2]) for FileName in Spectra ]\nData = [ ( np.loadtxt(FileName) ) for FileName in Spectra ]\n\nfor i in range(len(Spectra)):\n\t#~ plt.plot( Data[i][:,0], Data[i][:,1], label= str(Energy[i]) + 'kV, Mean=' + str(MeanEnergy[i]) +'keV' )\n\tplt.plot( Data[i][:,0], Data[i][:,1], label=str(Energy[i]) +'kV')\n\nplt.legend(loc='best')\nplt.title('X-ray spectra for ' + AnodeMaterial[0] + ' Anode,\\n' + FilterMaterial[0] + ' Filter with ' + str(FilterThickness[0]) + ' mm Thickness' )\nplt.xlabel('Energy [kV]')\nplt.ylabel('Photons')\n\n# Plot of Ball Lenses\nplt.subplot(236)\nDia = np.arange(0,15,0.2)\nNA = ( 0.918919 * ( -1.0 + Dia ) ) / Dia\nFNo = ( 0.544118 * Dia ) / ( -1.0 + Dia )\n\nplt.plot(Dia,NA,'r',label='NA')\nplt.plot(Dia,FNo,'g',label='FNo')\nplt.legend(loc='best')\nplt.xlim([1.5,10])\nplt.ylim([0.3,1.2])\n\nfor i in (2,8):\n\tplt.axvline(i,color='k')\n\tif i > 3:\n\t\tplt.axhline(NA[np.where(Dia == i)],color='k')\n\t\tplt.axhline(FNo[np.where(Dia == i)],color='k')\n\nplt.savefig('CalculateDetector.png')\n\n###################### OUTPUT ######################\nif options.Output:\n\tPrefix = 'Config'\n\ttry:\n\t\tos.mkdir(os.path.join(os.getcwd(),Prefix))\n\texcept:\n\t\tprint 'Directory',os.path.join(os.getcwd(),Prefix),'already exists, did not create it...'\n\tprint\n\t\n # We should probably do something more clever with \"print \"%10.4f\" % options\" than the stuff below\n\tSaveName = Prefix + str(options).replace('{','_').replace('}','').replace(\"'\",'').replace(': ','_').replace(', ','-').replace('-Output_True','').replace('9999999999999','') # getting the output of 'options' and doing some string-replacement to get a nice filename for the output.\n\t\t\n\t#### FIGURE #### \n\tsavefig(os.path.join(Prefix,''.join([SaveName,'.png'])),dpi=fig.dpi)\n\tprint 'Figure saved to ' + os.path.join(Prefix,''.join([SaveName,'.png']))\n\tprint \n\t#### LOGFILE ####\n\t# Redirect console-output to a file according to http://stackoverflow.com/a/4829801/323100\n\tlogfile = open(os.path.join(Prefix,''.join([SaveName,'.txt'])),'w') # open the result file in write mode\n\told_stdout = sys.stdout # store the default system handler to be able to restore it\n\tsys.stdout = logfile # Now your file is used by print as destination \n\t\n\tprint 'Call the script with the commandline below to get the same result...'\n\tprint ' '.join(sys.argv)\n\tprint '________________________________________________________________________________'\n\tprint 'If we define the intensifying screen:'\n\tprint ' - to have an absorption of',100*ScreenAbsorption,'%'\n\tprint ' - to convert',100*ScreenConversion,'% of the incoming x-rays to visible light'\n\tprint ' - and to have an emmittance of',100*ScreenAbsorption,'% of all converted photons'\n\tprint 'we have a total efficiency of the screen of ',100*ScreenOutput,'%.'\n\n\tprint\n\tprint 'One incoming',options.InputEnergy/1000,'keV x-ray photon will thus produce:'\n\tprint ' -',int(round(PhotonsAfterScintillator)),'photons with a wavelength'\\\n\t\t'of',int(Wavelength*1e9),'nm (or',round(PhotonEnergyeV,3),'eV).'\n\n\tprint ' -',int(round(PhotonsAfterLens)),'of these photons (' +\\\n\t\tstr(LensTransmission*100) + ' %) will arrive at the sensor'\n\n\tprint ' - which will produce',int(round(ProducedElectrons)),'electrons',\\\n\t\t'on a sensor with a QE of',QESensor\n\t\t\n\tprint 'To achieve',options.LinePairs,'lp/mm on a',options.FOV,\\\n 'cm scintillator, we need a sensor with',round(int(PixelsNeeded)**2/1e6,1),\\\n\t\t'Mpx (' + str(int(PixelsNeeded)) + 'x' + str(int(PixelsNeeded)),\\\n 'px), which results in pixels with a physical size of',\\\n\t\tround(SensorPixelSize*1000,2),'um on a',options.SensorSize,'cm sensor.'\n\n\tprint 'For the chosen optical configuration of:'\n\tprint ' - FOV =','%.2f' % options.FOV,'cm and'\n\tprint ' - Opening angle =','%.2f' % options.OpeningAngle + 'deg we get a'\n\tprint ' - Working distance of','%.2f' % WorkingDistance,'cm'\n\t\n\tprint\n\tprint 'Numerical Aperture:'\n\tprint ' - calculated NA:',NumericalApertureCalculated,'(central element in scintillator layer of FPD)'\n\tprint ' - average NA:',NumericalApertureAverage,'(average NA on optical axis assuming 10 um distance between scintillator and detector)'\n\tprint ' - NA JBAG lenses:',NumericalApertureJBAG,'(assuming F=1/2NA -> NA = 1/2F, with F =',FStopJBAG,')'\n\tprint ' - NA for our sensor:',NumericalApertureDetermined,'(according to Rene = SensorDistance / ( FStop * 2 * SensorDistance / Magnification )'\n\t\n\tsys.stdout=old_stdout # here we restore the default behavior\n\tlogfile.close() # do not forget to close your file\n\n\tprint 'Logfile saved to ' + os.path.join(Prefix,''.join([SaveName,'.txt']))\n\tprint\nelse:\n\tplt.show()\n\t\nprint 'The options were:'\nprint str(options).replace('{','').replace('}','').replace(\"'\",'').replace(', ','\\n') # getting the output of 'options' and doing some string-replacement to get a nice filename for the output.\n\nprint 80 * '_'\nprint 'Call the script with the commandline below to get the same result...'\nprint ' '.join(sys.argv)\n\nif options.Output:\n print\n print 'use the command below to open all the generated .png files with Fiji'\n viewcommand = '/home/scratch/Apps/Fiji.app/fiji-linux -eval \\'run(\"Image',\\\n 'Sequence...\", \"open=' + os.getcwd() + ' starting=1 increment=1',\\\n 'scale=100 file=png or=[] sort\");\\' &'\n print viewcommand\n print 80 * '_'\n\n\"\"\"\n# kill all runnig fiji jobs\nkillall fiji-linux;\n# remove all calculated images\nrm *.png;\n# calculate some stuff\nfor f in {10..43..15}; # Field of View\ndo echo FOV $f;\nfor o in {10..150..15}; # Opening Angle\ndo echo OpeningAngle $o;\nfor s in {5..25..15}; # Sensor Size\ndo echo SensorSize $s;\n./CalculateDetector.py -f $f -o $o -s $s -p;\ndone;\ndone;\ndone\n# open fiji\n/home/scratch/Apps/Fiji.app/fiji-linux -eval 'run(\"Image Sequence...\", \"open=/afs/psi.ch/project/EssentialMed/Dev starting=1 increment=1 scale=100 file=png or=[] sort\");' & # start fiji\n#\n\"\"\"\n" }, { "alpha_fraction": 0.6725873351097107, "alphanum_fraction": 0.689757227897644, "avg_line_length": 27.149999618530273, "blob_id": "5bc6c1cf2e63b4c51ebaecfa3bf19323b3bc325d", "content_id": "ed3998f876097a4803e53be90c2a128d6a054e85", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1689, "license_type": "permissive", "max_line_length": 115, "num_lines": 60, "path": "/elphel/GPIO_input.py", "repo_name": "yongkaka/GlobalDiagnostiX", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# coding=utf8\n\n# Script to work with the Input/Output Pins of the RPi\n# Ultimately thought to trigger the Elphel camera\n# Based on http://code.google.com/p/raspberry-gpio-python/\n\nimport sys\nimport time\n# Try to import the GPIO library\ntry:\n\timport RPi.GPIO as GPIO\nexcept:\n\tprint 'I cannot import RPI.GPIO, you have to run the script as root'\n\tprint 'try running it again with'\n\tprint '---'\n\tprint 'sudo',' '.join(sys.argv) # joining the sys.argv list to a string so we can print it\n\tprint '---'\n\tsys.exit(1)\n\ndef is_even(i):\n\treturn (i % 2) == 0\n\n# to use Raspberry Pi board pin numbers\nGPIO.setmode(GPIO.BOARD) # Named sequentially, as seen on the connector. compare http://elinux.org/File:GPIOs.png\n# GPIO.setmode(GPIO.BCM) # Named GPIO*, see table http://is.gd/xWDsp7 (e.g. 007 is the last pin)\n\nprint 'set up GPIO input channel'\nPin = 26 # BOARD\n#~ Pin = 007 # BMC\nGPIO.setup(Pin, GPIO.IN)\n\nprint\nprint 'I am waiting for you to connect pin', Pin, 'and ground'\nprint\n\n# Wait for Input, then print something and wait for a short while\n# Code according to http://is.gd/G88UyN\ncounter = 1\nPrevious_Reading = 0\nwhile True:\n\tif GPIO.input(Pin):\n\t\tprint \"Pin\", Pin, \"and Ground are connected (\" + str(counter),\\\n\t\t\t \"times).\"\n\t\tcounter += 1\n\t\ttime.sleep(0.05)\n\n#~ Counter = 1\n#~ Previous_Input = 0\n#~ while Counter < 100:\n\t#~ Input = GPIO.input(Pin)\n\t#~ if not Previous_Input and Input:\n\t\t#~ print \"Pin\", Pin, \"and Ground are connected (\" + str(Counter),\\\n\t\t\t #~ \"times).\"\n\t\t#~ Counter += 1\n\t#~ Previous_Input = Input\n\t#~ time.sleep(0.1)\n\n# Reset every channel that has been set up by this program to INPUT with no pullup/pulldown and no event detection.\nGPIO.cleanup()\n" }, { "alpha_fraction": 0.7369052171707153, "alphanum_fraction": 0.7516993284225464, "avg_line_length": 58.5476188659668, "blob_id": "dfc5e2ba2a945390ca396de34855ec33487b9576", "content_id": "ec3467e683d8b34ecfce046fb6cde098588e9cb3", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2501, "license_type": "permissive", "max_line_length": 238, "num_lines": 42, "path": "/aptina/setup.md", "repo_name": "yongkaka/GlobalDiagnostiX", "src_encoding": "UTF-8", "text": "# How to setup DevWareX on Ubuntu 12.04\n\n## Install Python3\n- Follow the [instructions] from Aptina on their Atalassian-page, namely\n\t- `wget http://www.python.org/ftp/python/3.3.2/Python-3.3.2.tar.xz`\n\t- `tar -xvf Python-3.3.2.tar.xz`\n\t- `cd Python-3.3.2`\n\t- `./configure --enable-shared --prefix=/usr && make && make install`\n\t- `sudo ln -s /Library/Frameworks/Python.framework/Versions/3.3/Python /usr/lib/libpython3.3m.dylib`\n\n# Install DevWare\n- `sudo apt-get install libtbb-dev` to install a necessary [library]\n- Download a recent version from the [DevSuite]-website, either manually or with this command, which downloads Version 1.4 for Linux32, unpacks it and starts the installation:\n `wget https://aptina.atlassian.net/wiki/download/attachments/11501573/DevWareX_linux32_1_4.tar;tar -xvf DevWare*.tar;./Developer`\n\n# Get sensor and board files\n## Very Easy\n- Get the files from your Aptina representative, save them to the `data` directory.\n\n## Easy\nSince you've probably already checked out the [GlobalDiagnostiX repository][GDXrepo] or are working at PSI, you can just symlink the necessary files.\n- `cd` into the `data` directory inside the directory were you installed the DevSuite.\n- `ln -s /afs/psi.ch/project/EssentialMed/Dev/aptina/data/* .`\n\n## Harder\nYou can only check out the files you need from the [GlobaldiagnostiX repository][GDXrepo]. Although you probably want to go the *Very Easy* or *Easy route*...\n- `cd` into the directory were you installed the DevSuite.\n- `rm -r data` to remove the original `data` directory.\n- `git init data;cd data` to make a new Git repository.\n- `git remote add -f origin [email protected]:habi/GlobalDiagnostiX.git` to add the original repository as remote.\n- `git config core.sparsecheckout true;mkdir .git/info` to enable sparse checkout\n- `echo aptina/data/apps_data/ >> .git/info/sparse-checkout;echo aptina/data/board_data/ >> .git/info/sparse-checkout;echo aptina/data/sensor_data/ >> .git/info/sparse-checkout` to add the necessary files to the desired files to checkout.\n- `git pull origin master` to get them\n- `mv aptina/data/* .;rm -r aptina` to remove some cruft \n\n# Start the DevSuite\n- `cd PATH_TO_DEVSUITE` and start it with `./DevWareX.exe`\n\n[instructions]: https://aptina.atlassian.net/wiki/display/DEVS/DevWareX+Installation+Instructions+-+Linux\n[library]: http://packages.ubuntu.com/precise/libtbb-dev\n[DevSuite]: https://aptina.atlassian.net/wiki/display/DEVS/Software+Downloads\n[GDXrepo]: https://github.com/habi/GlobalDiagnostiX\n" }, { "alpha_fraction": 0.6222345232963562, "alphanum_fraction": 0.6548672318458557, "avg_line_length": 39.17777633666992, "blob_id": "aa7c90c933e649ad541126eba5c520767778c000", "content_id": "7873f7f21afb3ac1fd567364f0e14a18b6aec486", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1808, "license_type": "permissive", "max_line_length": 74, "num_lines": 45, "path": "/PlotXraySpectra.py", "repo_name": "yongkaka/GlobalDiagnostiX", "src_encoding": "UTF-8", "text": "from pylab import *\nimport os\nimport scipy\nfrom scipy.integrate import trapz\n\n# http://stackoverflow.com/a/11249430/323100\nSpectrapath = '/afs/psi.ch/project/EssentialMed/Dev/Spectra'\nSpectra = [\n (os.path.join(Spectrapath, 'Xray-Spectrum_040kV.txt')),\n (os.path.join(Spectrapath, 'Xray-Spectrum_046kV.txt')),\n (os.path.join(Spectrapath, 'Xray-Spectrum_053kV.txt')),\n (os.path.join(Spectrapath, 'Xray-Spectrum_060kV.txt')),\n (os.path.join(Spectrapath, 'Xray-Spectrum_070kV.txt')),\n (os.path.join(Spectrapath, 'Xray-Spectrum_080kV.txt')),\n (os.path.join(Spectrapath, 'Xray-Spectrum_090kV.txt')),\n (os.path.join(Spectrapath, 'Xray-Spectrum_100kV.txt')),\n (os.path.join(Spectrapath, 'Xray-Spectrum_100kV.txt')),\n (os.path.join(Spectrapath, 'Xray-Spectrum_120kV.txt'))]\n\nData = [(np.loadtxt(FileName)) for FileName in Spectra]\nEnergy = [int(open(FileName).readlines()[2].split()[4])\n for FileName in Spectra]\nMean = [np.round(double(open(FileName).readlines()[5].split()[3]),\n decimals=2) for FileName in Spectra]\n\nfor i in range(len(Spectra)):\n plt.plot(Data[i][:, 0], Data[i][:, 1],\n label=str(Energy[i]) + 'kV, Mean=' + str(Mean[i]) + 'keV')\n\nplt.legend(loc='best')\nplt.title('X-ray spectra')\nplt.xlabel('Energy [kV]')\nplt.ylabel('Photons')\nplt.savefig('plot.pdf')\n\nfor WhichOneShallWeIntegrate in range(len(Spectra)):\n #~ plt.figure()\n #~ plt.plot(Data[WhichOneShallWeIntegrate][:, 0],\n #~ Data[WhichOneShallWeIntegrate][:, 1])\n\n Integral = scipy.integrate.trapz(Data[WhichOneShallWeIntegrate][:, 1],\n Data[WhichOneShallWeIntegrate][:, 0])\n print 'The integral for', Energy[WhichOneShallWeIntegrate], 'kV is',\\\n str(round(Integral/1e6, 3)) + 'e6 photons'\n #~ plt.show()\n" }, { "alpha_fraction": 0.664176344871521, "alphanum_fraction": 0.6850952506065369, "avg_line_length": 26.597938537597656, "blob_id": "116ee5809963658dea7460a81fff09d8c3dd5349", "content_id": "746a73b47123c9e501ce2add388a35b41da37444", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2680, "license_type": "permissive", "max_line_length": 73, "num_lines": 97, "path": "/AngularOpening.py", "repo_name": "yongkaka/GlobalDiagnostiX", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python\n# coding: utf-8\n\nimport optparse\nimport sys\nimport numpy\nfrom pylab import *\nimport time\n\nion()\n\n# Use Pythons Optionparser to define and read the options, and also\n# give some help to the user\nparser = optparse.OptionParser()\nusage = \"usage: %prog [options] arg\"\nparser.add_option('-a', dest='Angle', type='float',\n\thelp='Angular view of the Objective',\n\tmetavar='53')\nparser.add_option('-f', dest='FOV', type='float',\n\tdefault = 43,\n\thelp='Desired field of view (square for the moment). Default = 43 cm',\n\tmetavar='43')\t\n(options, args) = parser.parse_args()\n\n# show the help if no parameters are given\nif options.Angle==None:\n\tparser.print_help()\n\tprint ''\n\tprint 'Example:'\n\tprint 'The command below shows the configuration of a detector with '\n\tprint 'an optics with an opening angle of 78° used to get a field'\n\tprint 'of view of 50 cm:'\n\tprint ''\n\tprint 'EssentialMed-Optics.py -a 78 -f 50'\n\tprint ''\n\tsys.exit(1)\nprint ''\n\n#~ tan(\\alpha/2) = (FOV/2) / Distance\n#~ Distance = (FOV/2)/tan(\\alpha/2)\n\nFOV = float(options.FOV)\nAngleDeg = float(options.Angle)\nAngleRad = numpy.deg2rad(AngleDeg)\nWorkingDistance = (FOV/2)/numpy.tan(AngleRad/2)\n\nprint 'The working distance for'\nprint 'a desired field of view of ' + str('%.2f' % FOV) + ' cm and '\nprint 'an opening angle of ' + str('%.2f' % AngleDeg) + '°'\nprint 'is ' + str('%.2f' % WorkingDistance) + ' cm'\n\n# Camera\nCamSize = 5.25\nrect = Rectangle((-CamSize, -(float(CamSize)/2)), CamSize, CamSize,\n\tfacecolor=\"#aaaaaa\")\ngca().add_patch(rect)\n\n# Angle\nfrom matplotlib.patches import Wedge\nwedgecolor = 'r'\nWedge = Wedge((0,0), WorkingDistance*.3, -(AngleDeg/2), (AngleDeg/2),\n\tfill=False, color=wedgecolor)\nplt.gca().add_patch(Wedge)\n\n# Beams\nbeamcolor = wedgecolor\nplt.plot([0,WorkingDistance],[0,FOV/2],beamcolor)\nplt.plot([0,WorkingDistance],[0,-FOV/2],beamcolor)\n\n# Screen\nscreencolor = 'k'\nplt.plot(\n\t[WorkingDistance,WorkingDistance],\n\t[(options.FOV/2),-(options.FOV/2)],linewidth='5',color=screencolor)\nplt.axhline(color=screencolor,linestyle='--')\n\nplt.axis('equal')\nplt.title('Angular opening: ' + str(options.Angle) + ', Screen size: ' +\\\n\tstr(options.FOV) + 'cm , Working Distance: ' +\\\n\tstr('%.2f' % WorkingDistance) + 'cm')\nplt.xlabel('Distance [cm]')\nplt.text(WorkingDistance*.3*numpy.cos(AngleRad), \n\tWorkingDistance*.3*numpy.sin(AngleRad),\n\tstr(options.Angle) + u'°') # http://is.gd/pxodor\nplt.axis([-5,85,0,0])\nplt.draw()\n\nSaveName = 'EssentialMed-Optics_angle_' +str(options.Angle) + '_wd_' +\\\n\tstr('%.2f' % WorkingDistance)\nFigureName = ''.join([SaveName,'.png'])\nsavefig(FigureName)\nprint 'Figure saved to ' + FigureName\n\nplt.show()\n#~ plt.draw()\n#~ time.sleep(1)\n#~ plt.close()\n" }, { "alpha_fraction": 0.6765819191932678, "alphanum_fraction": 0.6917252540588379, "avg_line_length": 29.816667556762695, "blob_id": "04a44d27a99d4f64d10b1065eac65a3bb2f64094", "content_id": "aae71ee3275a3f8e5f4cef00b25fe0983cad9ace", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1849, "license_type": "permissive", "max_line_length": 115, "num_lines": 60, "path": "/elphel/GPIO_output.py", "repo_name": "yongkaka/GlobalDiagnostiX", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# coding=utf8\n\n# Script to work with the Input/Output Pins of the RPi\n# Ultimately thought to trigger the Elphel camera\n# Based on http://code.google.com/p/raspberry-gpio-python/\n\nimport sys\nimport time\n# Try to import the GPIO library\ntry:\n\timport RPi.GPIO as GPIO\nexcept:\n\tprint 'I cannot import RPI.GPIO, you have to run the script as root'\n\tprint 'try running it again with'\n\tprint '---'\n\tprint 'sudo',' '.join(sys.argv) # joining the sys.argv list to a string so we can print it\n\tprint '---'\n\tsys.exit(1)\n\ntry:\n\tPin = int(sys.argv[1])\n\tsleepytime = float(sys.argv[2])\n\tsteps = int(sys.argv[3])\nexcept:\t\n\tprint 'Start the script with three parameters'\n\tprint sys.argv[0],'Pin Sleeptime Repeats'\n\tsys.exit(1)\n\ndef is_even(i):\n\treturn (i % 2) == 0\n\n# to use Raspberry Pi board pin numbers\nGPIO.setmode(GPIO.BOARD) # Named sequentially, as seen on the connector. compare http://elinux.org/File:GPIOs.png\n# GPIO.setmode(GPIO.BCM) # Named GPIO*, see table http://is.gd/xWDsp7 (e.g. 007 is the last pin)\n\nprint 'set up GPIO output channel'\n# Pin = 26 # BOARD\n#~ Pin = 007 # BMC\nGPIO.setup(Pin, GPIO.OUT)\n\n# set RPi board pin selected above to high for a certain time, wait, set it low\n# lather, rinse, repeat for 'steps' steps\ntry:\n\tfor Iteration in range(steps):\n\t\tif is_even(Iteration):\n\t\t\tprint str(\"%.02d\" % (Iteration + 1)) + '/' + str(\"%.02d\" % (steps)),'| Pin',Pin,'^ for',sleepytime,'s'\n\t\t\tGPIO.output(Pin, GPIO.HIGH)\n\t\t\ttime.sleep(sleepytime)\n\t\telse:\n\t\t\tprint str(\"%.02d\" % (Iteration + 1)) + '/' + str(\"%.02d\" % (steps)),'| Pin',Pin,'v for',sleepytime,'s'\n\t\t\tGPIO.output(Pin, GPIO.LOW)\n\t\t\ttime.sleep(sleepytime)\nexcept KeyboardInterrupt:\n\tprint\n\tprint 'User aborted sequence, goodbye'\n\tpass\n\n# Reset every channel that has been set up by this program to INPUT with no pullup/pulldown and no event detection.\nGPIO.cleanup()\n" }, { "alpha_fraction": 0.706256628036499, "alphanum_fraction": 0.7232237458229065, "avg_line_length": 32.67856979370117, "blob_id": "f122d49ddfee1a42c8d916128a526396a09e35f4", "content_id": "7a1ad8560173ab9fe4b3bf17504d5074d554100e", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 943, "license_type": "permissive", "max_line_length": 101, "num_lines": 28, "path": "/elphel/elphel.py", "repo_name": "yongkaka/GlobalDiagnostiX", "src_encoding": "UTF-8", "text": "from optparse import OptionParser\nimport os\nimport urllib\nimport time\n\nparser = OptionParser()\nusage = 'usage: % prog [options] arg'\n\nparser.add_option('-i', dest='Images',help='how many images should I save?',metavar='1234',type=int) \n(options,args) = parser.parse_args()\n\n# Make a subdirectory to the current directory we're in\ntry:\n\tos.mkdir(os.path.join(os.getcwd(),'Elphel'))\nexcept:\n\tprint 'Elphel-directory already exists'\nSaveDir = os.path.join(os.getcwd(),'Elphel',str(time.time()))\nos.mkdir(SaveDir)\n\n# get options.Images number of images as fast as possible from the camera\nfor i in range(options.Images):\n\tprint 'writing image',i,'/',len(range(options.Images))\n\t# get the url of the camera which spit out an image\n\t# save the image to 'SaveDir', with an unique name based on the current time\n\turllib.urlretrieve(\"http://192.168.0.9:8081/wait/img\",\n\t\t\t\t\t os.path.join(SaveDir,str(time.time()) + '.jpg'))\n\nprint 'saved to',SaveDir\n" }, { "alpha_fraction": 0.6071922779083252, "alphanum_fraction": 0.6249423623085022, "avg_line_length": 38.08108139038086, "blob_id": "74645e3d6a3af321a92d3d43c6f409f7c115c0ea", "content_id": "056baac2ac71665bd3b04fd42c26460184702ae8", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8686, "license_type": "permissive", "max_line_length": 79, "num_lines": 222, "path": "/ReadCamera.py", "repo_name": "yongkaka/GlobalDiagnostiX", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nScript to read out the TIScamera using python.\n\"\"\"\n\nfrom optparse import OptionParser\nimport sys\nimport os\nimport subprocess\nimport time\nimport matplotlib.pylab as plt\n\n# Use Pythons Optionparser to define and read the options, and also\n# give some help to the user\nparser = OptionParser()\nusage = \"usage: %prog [options] arg1 arg2\"\nparser = OptionParser(usage=usage)\nparser.add_option(\"-c\", \"--camera\", dest=\"camera\",\n default=\"tis\", type='str', metavar='name',\n help=\"Camera to use; at the moment 'tis', 'aptina' and \"\n \"'awaiba', even when the two latter options are not \"\n \"implemented yet... (default: %default)\")\nparser.add_option(\"-e\", \"--exposure\", dest=\"exposuretime\",\n metavar='125', type='float',\n help=\"Exposure time [ms]\")\nparser.add_option(\"-f\", \"--framerate\", dest=\"framerate\",\n metavar='30', type='int',\n help=\"Framerate of the ffmpeg-process at the end\")\nparser.add_option(\"-i\", \"--images\", dest=\"images\",\n default=5, type=\"int\",\n help=\"How many images should ffmpeg save at the end? \"\n \"(default: %default)\")\nparser.add_option(\"-p\", \"--preview\", dest=\"preview\",\n action=\"store_true\", default=False,\n help=\"Preview image (default: %default)\")\nparser.add_option(\"-s\", \"--suffix\", dest=\"suffix\",\n type='str', metavar='Suffix',\n help=\"Suffix to add after the foldername\")\nparser.add_option(\"-v\", \"--verbose\", dest=\"verbose\",\n action=\"store_true\", default=False,\n help=\"Be chatty. (default: %default)\")\n(options, args) = parser.parse_args()\n\nif len(sys.argv[1:]) == 0:\n print \"You need to enter at least one option, here's the help\"\n parser.print_help()\n sys.exit()\n\nif not options.exposuretime:\n print 'You need to supply an exposure time we should use.'\n print 'Enter the command like so:'\n print ' ', ' '.join(sys.argv), \"-e exposuretime\"\n sys.exit()\n\nprint 80 * \"-\"\nprint \"Hey ho, let's go!\"\n\n# Check at which /dev/video we have a camera\nfor device in range(5):\n if os.path.exists('/dev/video' + str(device)):\n CameraPath = '/dev/video' + str(device)\n if options.verbose:\n print 'Found a camera on', CameraPath\n break\n else:\n if options.verbose:\n print 'Nothing found at /dev/video' + str(device)\n\nif options.verbose:\n print \"We are trying to work with the '\" + options.camera + \"' camera\"\n print\n print \"Getting available sizes\"\n\n# Get available output sizes of the currently connected camera using v4l2-ctl\nprocess = subprocess.Popen(['v4l2-ctl', '--device=' + CameraPath,\n '--list-formats-ext'], stdout=subprocess.PIPE)\noutput, error = process.communicate()\nwidth = []\nheight = []\nfor line in output.split(\"\\n\"):\n if line and line.split()[0].startswith(\"Size\"):\n width.append(int(line.split()[2].split(\"x\")[0]))\n height.append(int(line.split()[2].split(\"x\")[1]))\nfor size in range(len(width)):\n if options.verbose:\n print \" *\", width[size], \"x\", height[size], \"px\"\nif options.camera == \"tis\":\n CMOSwidth = max(width)\n CMOSheight = max(height)\nelif options.camera == 'aptina':\n CMOSwidth = 123\n CMOSheight = 456\nelif options.camera == 'awaiba':\n CMOSwidth = 123\n CMOSheight = 456\nprint \"We are using a\", CMOSwidth, \"x\", CMOSheight, \"px detector size to\",\\\n \"proceed.\"\n\n#~ Set exposure time\n#~ According to http://goo.gl/D8MHsW and http://is.gd/zaxWn7, the exposure time\n#~ is set in \"100 µs units, where the value 1 stands for 1/10000th of a second,\n#~ 10000 for 1 second [...]\". The user sets the exposure time in ms (1000 µs)\n#~ 1 s = 10⁶ µs = 10⁴ units -> 1000 ms = 10⁴ units. From ms to units -> * 10\nif options.verbose:\n print 'The desired exposure time is', options.exposuretime, 'ms',\nelse:\n print 'Setting exposure time to', options.exposuretime, 'ms'\noptions.exposuretime = options.exposuretime * 10\nif options.verbose:\n print '(corresponding to', int(options.exposuretime), '\"100 µs units\").'\n\nif options.verbose:\n process = subprocess.Popen(['v4l2-ctl', '--device=' + CameraPath, '-L'],\n stdout=subprocess.PIPE)\n output, error = process.communicate()\n for line in output.split(\"\\n\"):\n if line and line.split()[0].startswith(\"exp\"):\n print \"The camera was set from an exposure time of\",\\\n line.split(\"=\")[-1], \"units\",\n\n#~ Use 'v4l2-ctl -c exposure_absolute=time' to set exposure time\nprocess = subprocess.Popen([\"v4l2-ctl\", '--device=' + CameraPath,\n \"-c\", \"exposure_absolute=\" +\n str(options.exposuretime)], stdout=subprocess.PIPE)\nif options.verbose:\n process = subprocess.Popen(['v4l2-ctl', '--device=' + CameraPath, '-L'],\n stdout=subprocess.PIPE)\n output, error = process.communicate()\n for line in output.split(\"\\n\"):\n if line and line.split()[0].startswith(\"exp\"):\n print \"to\", line.split(\"=\")[-1], \"units.\"\n\n# Construct a general NULL pointer, used for the subprocesses\nDEVNULL = open(os.devnull, 'w')\n# Show the stream if desired\nif options.preview:\n # Setting preview to 720p, since bigger doesn't work with mplayer\n previewwidth = 1280\n previewheight = 720\n print \"I'm now showing you a\", previewwidth, \"x\", previewheight, \"px\",\\\n \"preview image from the upper left corner of the sensor.\"\n # mplayer command based on TIScamera page: http://is.gd/5mJEM7\n mplayercommand = \"mplayer tv:// -tv width=\" + str(previewwidth) +\\\n \":device=\" + CameraPath + \" -geometry 50%:50% -title 'Previewing\" +\\\n \" top left edge (\" + str(previewwidth) + \"x\" + str(previewwidth) +\\\n \" px), with an exposure time of \" + str(options.exposuretime / 10) +\\\n \" ms' -nosound\"\n if options.verbose:\n print 'Previewing images with'\n print\n print mplayercommand\n print\n print \"Exit with pressing the 'q' key!\"\n subprocess.call(mplayercommand, stdout=DEVNULL, stderr=subprocess.STDOUT,\n shell=True)\n\n# Save output to a file, load that and display it.\n# We save option.images images, since we often demand an image from the camera\n# while it is in the middle of a circle, thus it's a corrupted image...\n\n\n# Construct path\nFileSavePath = os.path.join('Images', options.camera, str(int(time.time())))\nif options.suffix:\n FileSavePath += '_' + str(options.suffix)\nif options.exposuretime:\n # Go back from 100 us units to real time\n FileSavePath += '_' + str(options.exposuretime / 10) + 'ms'\nif options.framerate:\n FileSavePath += '_' + str(options.framerate) + 'fps'\ntry:\n # Generating necessary directories\n os.makedirs(FileSavePath)\nexcept:\n print FileSavePath, 'cannot be generated'\n sys.exit(1)\n\n# ffmpeg command based on http://askubuntu.com/a/102774\nprint \"Getting\", options.images, \"images from the camera\"\n# Hz = int(round(1 / (options.exposuretime / 10 / 1000)))\nffmpegcommand = \"ffmpeg -f video4linux2 -s \" + str(CMOSwidth) + \"x\" +\\\n str(CMOSheight) + \" -i \" + CameraPath + \" -vframes \" +\\\n str(options.images) + \" \"\nif options.framerate:\n ffmpegcommand += \"-r \" + str(options.framerate) + \" \"\nffmpegcommand += FileSavePath + \"/snapshot_%03d.jpg\"\nif options.verbose:\n print 'Saving images with'\n print\n print ffmpegcommand\n print\nt0 = time.time()\nsubprocess.call(ffmpegcommand, stdout=DEVNULL, stderr=subprocess.STDOUT,\n shell=True)\nt1 = time.time()\nprint \"in\", str(round(t1 - t0, 3)), \"seconds (\" +\\\n str(round(options.images / (t1-t0), 3)) + \" images per second)\"\n\nfilename = os.path.join(FileSavePath,\n \"snapshot_%03d\" % (int(round(options.images / 2.0))) + \".jpg\")\n\nimage = plt.imread(filename)\nplt.imshow(image, origin=\"lower\")\nfiguretitle = \"Snapshot\", str(int(round(options.images / 2.0))), \"of\",\\\n str(options.images), \"from\", FileSavePath, \"\\nwith an exposure time of\",\\\n str(options.exposuretime / 10), \"ms\",\nif options.preview:\n plt.axhspan(ymin=CMOSheight-previewheight, ymax=CMOSheight,\n xmin=0, xmax=float(previewwidth)/CMOSwidth,\n facecolor='r', alpha=0.5)\n plt.xlim([0, CMOSwidth])\n plt.ylim([0, CMOSheight])\n figuretitle += \"\\nred=preview area\",\nplt.title(' '.join(figuretitle))\nplt.show()\n\nprint 'Images saved to',\nprint os.path.abspath(os.path.join(FileSavePath, 'snapshot*.jpg'))\nprint 80 * \"-\"\nprint \"done\"\n" } ]
12
gedamia/clinic-income
https://github.com/gedamia/clinic-income
6698def3033a92b2fe10ee6641151edddf645704
abc3b3b6c10c1f1bea7f8917624f83f43b9ae963
3fec54c877e07c2c7335d1476c02e3588d3ca8ac
refs/heads/master
2021-04-27T08:12:40.256131
2018-04-03T22:22:00
2018-04-03T22:22:00
122,650,889
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5721703171730042, "alphanum_fraction": 0.5854101777076721, "avg_line_length": 29.57377052307129, "blob_id": "73b489533ab2e886aad33af939ba802e0c22f754", "content_id": "07ef5c262d9227e72628c5b9b125e63a0108967d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3852, "license_type": "no_license", "max_line_length": 96, "num_lines": 122, "path": "/create_patients.py", "repo_name": "gedamia/clinic-income", "src_encoding": "UTF-8", "text": "import xlrd\r\nimport openpyxl\r\nimport sys\r\nimport os\r\nimport os.path\r\nimport re\r\nimport time\r\nfrom openpyxl import load_workbook\r\nfrom openpyxl.reader.excel import load_workbook\r\nfrom openpyxl.workbook import Workbook\r\nfrom openpyxl.styles import Color, Fill\r\nfrom openpyxl.cell import Cell\r\nimport string\r\n\r\nimport shutil\r\n\r\nimport logging\r\nimport logging.handlers \r\n'''logging.basicConfig(level=logging.DEBUG,\r\n format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',\r\n datefmt='%a, %d %b %Y %H:%M:%S',\r\n filename='myapp.log',\r\n filemode='w')'''\r\n\r\nfrom openpyxl.styles import numbers, is_date_format\r\n\r\nINCOME_PATH = r\"C:\\Evelyn\\income\"\r\nREPORT_PATH = r\"C:\\Evelyn\\report\\2018 report\"\r\nLOG_FILE = 'claims_log.txt'\r\nREPORT_KIND_ACU = \"ACU\"\r\nREPORT_KIND_CHIRO = \"CHIRO\"\r\nREPORT_KIND_PT = \"PT\"\r\nREPORT_KIND_ALL= \"ALL\"\r\nCHARTS_PATH=r\"C:\\Evelyn\\charts\\lexington\"\r\n\r\ndef data_collection(data_path): \r\n for filename in os.listdir(data_path):\r\n fp = os.path.join(data_path, filename)\r\n if os.path.isfile(fp):\r\n file_name_ar = fp.split(\"\\\\\", fp.count(\"\\\\\"))\r\n patient_name = file_name_ar[4]\r\n patient_name = patient_name[0:-5]\r\n patient_name = patient_name.rstrip()\r\n patient_name = patient_name.lstrip(\",\")\r\n patient_folder_path = os.path.join(CHARTS_PATH, patient_name)\r\n if os.path.exists(patient_folder_path):\r\n shutil.rmtree(patient_folder_path)\r\n #os.mkdir(patient_folder_path)\r\n shutil.copytree(\"patient_name\", patient_folder_path) \r\n #open(patient_intakeForm_path, \"wb\").write(open(\"patient_name\", \"rb\").read())\r\n \r\n else:\r\n data_collection(fp)\r\n\r\ndef date_compare(year0,mon0,day0, year1,mon1,day1):\r\n year0 = int(year0)\r\n mon0 = int(mon0)\r\n day0 = int(day0)\r\n year1 = int(year1)\r\n mon1 = int(mon1)\r\n day1 = int(day1)\r\n \r\n \r\n if year0 > year1: \r\n return True\r\n elif year0 == year1 :\r\n if mon0 > mon1:\r\n return True\r\n elif mon0 == mon1:\r\n if day0 >= day1:\r\n return True\r\n else:\r\n return False\r\n else:\r\n return False\r\n else :\r\n return False\r\n \r\n#begin_date & end_date format [yyyymmdd]\r\ndef doing_financial_report(begin_date, end_date, report_kind = REPORT_KIND_ALL):\r\n acu_report_name = REPORT_PATH+\"\\\\\"+begin_date+\"-\"+end_date+\"-\"+REPORT_KIND_ACU+\".xlsx\"\r\n chiro_report_name = REPORT_PATH+\"\\\\\"+begin_date+\"-\"+end_date+\"-\"+REPORT_KIND_CHIRO+\".xlsx\"\r\n pt_report_name = REPORT_PATH+\"\\\\\"+begin_date+\"-\"+end_date+\"-\"+REPORT_KIND_PT+\".xlsx\"\r\n if os.path.exists(acu_report_name):\r\n os.remove(acu_report_name)\r\n if os.path.exists(chiro_report_name):\r\n os.remove(chiro_report_name)\r\n if os.path.exists(pt_report_name):\r\n os.remove(pt_report_name)\r\n #acu\r\n acu_wb = openpyxl.Workbook()\r\n acu_ws = acu_wb.active\r\n #chiro\r\n chiro_wb = openpyxl.Workbook()\r\n chiro_ws = chiro_wb.active\r\n #pt\r\n pt_wb = openpyxl.Workbook()\r\n pt_ws = pt_wb.active\r\n \r\n data_collection(ACCOUNTING_PATH, begin_date, end_date, acu_ws, chiro_ws, pt_ws, report_kind)\r\n\r\n if REPORT_KIND_ACU == report_kind:\r\n #acu\r\n acu_wb.save(acu_report_name)\r\n elif REPORT_KIND_CHIRO == report_kind:\r\n #chiro\r\n chiro_wb.save(chiro_report_name)\r\n elif REPORT_KIND_PT == report_kind:\r\n #pt\r\n pt_wb.save(pt_report_name)\r\n else:\r\n #acu\r\n acu_wb.save(acu_report_name)\r\n #chiro\r\n chiro_wb.save(chiro_report_name)\r\n #pt\r\n pt_wb.save(pt_report_name)\r\n \r\n \r\n\r\n#doing_financial_report(\"20180224\",\"20180226\")\r\ndata_collection(INCOME_PATH)\r\n" }, { "alpha_fraction": 0.5250566005706787, "alphanum_fraction": 0.5361299514770508, "avg_line_length": 39.0631217956543, "blob_id": "24a3b132e27dbfd6afb674daf23ea91bccab661b", "content_id": "6fd89724aee5b25bcb4be9991b1d9faa2c747b95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12372, "license_type": "no_license", "max_line_length": 132, "num_lines": 301, "path": "/eob_pt_aetna.py", "repo_name": "gedamia/clinic-income", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\r\n#encoding: utf-8\r\n\r\nimport importlib\r\nimport sys\r\nimport random\r\nfrom urllib.request import urlopen\r\nfrom urllib.request import Request\r\nimport string\r\nimport os\r\nimport re\r\nimport xlrd\r\nimport openpyxl\r\nimport sys\r\nimport os\r\nimport re\r\nfrom openpyxl import load_workbook\r\nfrom openpyxl.reader.excel import load_workbook\r\nfrom openpyxl.workbook import Workbook\r\nfrom openpyxl.styles import Color, Fill\r\nfrom openpyxl.cell import Cell\r\nfrom openpyxl.styles import numbers, is_date_format\r\nimport logging\r\nimport logging.handlers\r\nimport time\r\n\r\nLOG_FILE = 'claims_log.txt'\r\nlogging.basicConfig(level=logging.DEBUG,\r\n format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',\r\n datefmt='%a, %d %b %Y %H:%M:%S',\r\n filename='myapp.log',\r\n filemode='w')\r\n\r\nAETNA_ACU_PATH = r'C:\\Evelyn\\function\\need to do report\\eob\\acu\\Aetna\\aetna_acu_eob.xlsx'\r\nAETNA_PT_PATH = r'C:\\Evelyn\\function\\need to do eob\\pt\\eob_aetna_pt.xlsx'\r\nCIGNA_ACU_PATH = r'C:\\Evelyn\\function\\need to do report\\eob\\acu\\Cigna\\cigna_acu_eob.xlsx'\r\nUHC_ACU_PATH = r'C:\\Evelyn\\function\\need to do report\\eob\\acu\\UHC\\uhc_acu_eob.xlsx'\r\n\r\nINCOME_PATH_TEST = r\"C:\\Evelyn\\income\"\r\nNO_PAY_LIST = r\"C:\\Evelyn\\function\\need to do report\\no_pay_list.txt\"\r\n\r\nAETNA_ACU_NAME_TAG = \"Patient Name: \"\r\nAETNA_ACU_DOS_TAG = \"DATES CODE \"\r\nAETNA_ACU_CHARGE_TAG = \"TOTALS \"\r\nAETNA_ACU_PAID_TAG = \"ISSUED AMT: \"\r\nAETNA_ACU_NO_PAY_FLAG = \"NO PAY\"\r\nAETNA_ACU_TAG = \"AETNA_ACU_PTN\"\r\n\r\nAETNA_PT_NAME_TAG = \"Member Name:\"\r\nAETNA_PT_DOS_TAG = \"DOS PL \"\r\nAETNA_PT_CHARGE_TAG = \"Totals: \"\r\nAETNA_PT_PAID_TAG = \"Paid Amount \"\r\nAETNA_PT_NO_PAY_FLAG = \"$0.00\"\r\nAETNA_PT_TAG = \"AETNA_PT_PTN\"\r\n\r\nCIGNA_ACU_NAME_TAG = \"PATIENT NAME: \"\r\nCIGNA_ACU_DOS_TAG = \"TOTAL \"\r\nCIGNA_ACU_CHARGE_TAG = \"TOTAL \"\r\nCIGNA_ACU_PAID_TAG = \"PAYMENT OF\"\r\nCIGNA_ACU_NO_PAY_FLAG = \"$0.00\"\r\nCIGNA_ACU_TAG = \"CIGNA_ACU_PTN\"\r\n\r\nUHC_ACU_NAME_TAG = \"PATIENT: \"\r\nUHC_ACU_DOS_TAG = \"PATIENT: \"\r\nUHC_ACU_CHARGE_TAG = \"SUBTOTAL\"\r\nUHC_ACU_PAID_TAG = \"SUBTOTAL\"\r\nUHC_ACU_TAG = \"UHC_ACU_PTN\"\r\n\r\n\r\n\r\ndef searching( path, last_name, first_name, charge, dos_d, dos_m, dos_y, paid, ded):\r\n for filename in os.listdir(path):\r\n fp = os.path.join(path, filename)\r\n if not os.path.isfile(fp):\r\n searching( fp, last_name, first_name, charge, dos_d, dos_m, dos_y, paid, ded)\r\n else:\r\n if bool( re.search(last_name, filename, re.IGNORECASE) and re.search(first_name, filename, re.IGNORECASE) ):\r\n filling( fp, charge, dos_d, dos_m, dos_y, paid, ded)\r\n return\r\n else:\r\n continue\r\n no_pay_list = open(NO_PAY_LIST, 'a')\r\n no_pay_list.write(last_name+\",\"+first_name+\" -----------------------------------\\n\\n\")\r\n no_pay_list.close()\r\n return\r\n\r\ndef filling( path, charge, dos_d, dos_m, dos_y, paid, ded):\r\n \r\n data = openpyxl.load_workbook(path)\r\n sheet_names = data.get_sheet_names()\r\n sheet0 = data.get_sheet_by_name(sheet_names[0])\r\n for i in range(1,sheet0.max_row +1):\r\n data_of_service = str(sheet0.cell(row=i, column=3).value)\r\n charge_amt = sheet0.cell(row=i, column=7).value\r\n dos_date = data_of_service.split(\" \")[0].split(\"-\")\r\n \r\n if len(dos_date) > 2:\r\n dos_year = (data_of_service.split(\" \"))[0].split(\"-\")[0]\r\n dos_month = (data_of_service.split(\" \"))[0].split(\"-\")[1]\r\n dos_day = (data_of_service.split(\" \"))[0].split(\"-\")[2]\r\n \r\n if( (dos_day == dos_d) and (dos_year[-2:] == dos_y) and (dos_month == dos_m) and int(charge) == int(charge_amt) ):\r\n sheet0.cell(row=i, column=10).value = time.strftime(\"%m/%d/%Y\")\r\n sheet0.cell(row=i, column=11).value = paid\r\n sheet0.cell(row=i, column=12).value = \"payspan\"\r\n if ded > 0:\r\n sheet0.cell(row=i, column=14).value = \"ded\"+str(ded)\r\n \r\n if (AETNA_ACU_NO_PAY_FLAG == paid) or (AETNA_PT_NO_PAY_FLAG == paid):\r\n \r\n #no_pay_list.write(\"NO PAY ------------------------ \"+path+\",\"+dos_m+\"/\"+dos_d+\"/\"+dos_y+\",\"+str(charge)+\"\\n\\n\")\r\n print(\"no pay\"+path+\",\"+dos_m+\"/\"+dos_d+\"/\"+dos_y+\",\"+str(charge) +\",\"+str(ded)+\"\\n\\n\")\r\n break\r\n data.save(path)\r\n \r\ndef parsing_aetna_acu(income_path, eob_path):\r\n first_name = \"\"\r\n last_name = \"\"\r\n charge = 0\r\n dos_d = \"\"\r\n dos_m = \"\"\r\n dos_y = \"\"\r\n paid = 0\r\n eob_data = xlrd.open_workbook(eob_path)\r\n eob_table = eob_data.sheets()[0]\r\n for i in range(eob_table.nrows):\r\n data_value = str(eob_table.cell(i,0).value)\r\n if data_value.startswith(AETNA_ACU_NAME_TAG): #name\r\n patient_name = data_value.split(\":\", data_value.count(\":\"))\r\n patient_name = patient_name[1].split(\"(\", patient_name[1].count(\"(\"))\r\n patient_name = patient_name[0].split(\" \", patient_name[0].count(\" \"))\r\n first_name = patient_name[1]\r\n last_name = patient_name[-2]\r\n elif data_value.startswith(AETNA_ACU_DOS_TAG): #dos\r\n data_value = str(eob_table.cell(i+1,0).value)\r\n dos = data_value.split(\" \", data_value.count(\" \"))\r\n if len(dos) > 2:\r\n dos = str(dos[0]).split(\"/\", str(dos[0]).count(\"/\"))\r\n dos_d = dos[1]\r\n dos_m = dos[0]\r\n dos_y = dos[2]\r\n elif data_value.startswith(AETNA_ACU_CHARGE_TAG): #charge\r\n charge = data_value.split(\" \", data_value.count(\" \"))\r\n charge = charge[1].replace(\",\",\"\") #cool method!\r\n charge = float(charge)\r\n elif data_value.startswith(AETNA_ACU_PAID_TAG): #paid\r\n paid = data_value.split(\":\", data_value.count(\":\"))\r\n #begin to write into patients' claims\r\n searching( income_path, last_name, first_name, charge, dos_d, dos_m, dos_y, paid[1][1:])\r\n #end to write into patients' claims\r\n else:\r\n continue\r\n\r\ndef parsing_aetna_pt(income_path, eob_path):\r\n first_name = \"\"\r\n last_name = \"\"\r\n charge = 0\r\n dos_d = \"\"\r\n dos_m = \"\"\r\n dos_y = \"\"\r\n paid = 0\r\n eob_data = xlrd.open_workbook(eob_path)\r\n eob_table = eob_data.sheets()[0]\r\n for i in range(eob_table.nrows):\r\n data_value = str(eob_table.cell(i,0).value)\r\n if data_value.startswith(AETNA_PT_NAME_TAG): #name\r\n patient_name = data_value.split(\":\", data_value.count(\":\"))\r\n patient_name = patient_name[1].split(\" Product Type: \", patient_name[1].count(\" Product Type: \"))\r\n patient_name = patient_name[0].split(\" \", patient_name[0].count(\" \"))\r\n first_name = patient_name[1]\r\n last_name = patient_name[-3]\r\n elif data_value.startswith(AETNA_PT_DOS_TAG): #dos\r\n data_value = str(eob_table.cell(i+1,0).value)\r\n dos = data_value.split(\" \", data_value.count(\" \"))\r\n if len(dos) > 2:\r\n dos = str(dos[0]).split(\"/\", str(dos[0]).count(\"/\"))\r\n dos_d = dos[1]\r\n dos_m = dos[0]\r\n dos_y = dos[2][-2:]\r\n elif data_value.startswith(AETNA_PT_CHARGE_TAG): #charge\r\n charge = data_value.split(\" \", data_value.count(\" \"))\r\n deductable = charge[4].replace(\",\",\"\") #cool method!\r\n deductable = deductable.replace(\"(\",\"\")\r\n deductable = deductable.replace(\")\",\"\")\r\n deductable = float(deductable[1:])\r\n\r\n\r\n \r\n charge = charge[1].replace(\",\",\"\") #cool method!\r\n charge = charge.replace(\"(\",\"\")\r\n charge = charge.replace(\")\",\"\")\r\n charge = float(charge[1:])\r\n\r\n elif data_value.startswith(AETNA_PT_PAID_TAG): #paid\r\n paid = data_value.split(\" \", data_value.count(\" \"))\r\n #begin to write into patients' claims\r\n searching( income_path, last_name, first_name, charge, dos_d, dos_m, dos_y, paid[2], deductable)\r\n #end to write into patients' claims\r\n else:\r\n continue\r\n return\r\ndef parsing_cigna_acu(income_path, eob_path):\r\n first_name = \"\"\r\n last_name = \"\"\r\n charge = 0\r\n dos_d = \"\"\r\n dos_m = \"\"\r\n dos_y = \"\"\r\n paid = 0\r\n eob_data = xlrd.open_workbook(eob_path)\r\n eob_table = eob_data.sheets()[0]\r\n for i in range(eob_table.nrows):\r\n data_value = str(eob_table.cell(i,0).value)\r\n if data_value.startswith(CIGNA_ACU_NAME_TAG): #name\r\n patient_name = data_value.split(\":\", data_value.count(\":\"))\r\n patient_name = patient_name[1].split(\"PATIENT#:\", patient_name[1].count(\"PATIENT#:\"))\r\n patient_name = patient_name[0].split(\" \", patient_name[0].count(\" \"))\r\n first_name = patient_name[1]\r\n last_name = patient_name[-2]\r\n \r\n elif (data_value.startswith(CIGNA_ACU_DOS_TAG)) or (data_value.startswith(CIGNA_ACU_CHARGE_TAG)): #dos or charge\r\n data_value = str(eob_table.cell(i-1,0).value)\r\n dos = data_value[3:19]\r\n dos = dos.replace(\" \",\"\")\r\n dos_m = dos[:2]\r\n dos_d = dos[2:4]\r\n dos_y = dos[6:]\r\n\r\n data_value = str(eob_table.cell(i,0).value)\r\n charge = data_value.split(\".\", data_value.count(\".\"))\r\n charge = charge[0]+\".\"+charge[1][0:4]\r\n charge = charge[6:].replace(\",\",\"\") #cool method!\r\n charge = charge.replace(\" \",\"\")\r\n charge = float(charge)\r\n \r\n elif data_value.startswith(CIGNA_ACU_PAID_TAG): #paid\r\n paid = data_value.split(\" \", data_value.count(\" \"))[2]\r\n #begin to write into patients' claims\r\n searching( income_path, last_name, first_name, charge, dos_d, dos_m, dos_y, paid)\r\n #end to write into patients' claims\r\n else:\r\n continue\r\n return\r\ndef parsing_uhc_acu(income_path, eob_path):\r\n first_name = \"\"\r\n last_name = \"\"\r\n charge = 0\r\n dos_d = \"\"\r\n dos_m = \"\"\r\n dos_y = \"\"\r\n paid = 0\r\n eob_data = xlrd.open_workbook(eob_path)\r\n eob_table = eob_data.sheets()[0]\r\n for i in range(eob_table.nrows):\r\n data_value = str(eob_table.cell(i,0).value)\r\n \r\n if (data_value.startswith(UHC_ACU_NAME_TAG)) or (data_value.startswith(UHC_ACU_DOS_TAG)): #name or dos\r\n\r\n patient_name = data_value.split(\":\", data_value.count(\":\"))\r\n patient_name = patient_name[1].split(\"(\", patient_name[1].count(\"(\"))\r\n patient_name = patient_name[0].split(\" \", patient_name[0].count(\" \"))\r\n first_name = patient_name[1]\r\n last_name = patient_name[-2]\r\n \r\n\r\n dos_value = str(eob_table.cell(i+5,0).value)\r\n dos = dos_value.replace(\" \",\"\")\r\n dos = dos.split(\"-\")[0]\r\n dos_m = dos[:2]\r\n dos_d = dos[3:5]\r\n dos_y = dos[6:]\r\n \r\n elif (re.search(UHC_ACU_CHARGE_TAG, data_value)) or (data_value.startswith(UHC_ACU_PAID_TAG)): #charge or paid\r\n charge = data_value.split(UHC_ACU_CHARGE_TAG, data_value.count(UHC_ACU_CHARGE_TAG))\r\n charge = charge[1].split(\" \", charge[1].count(\" \"))\r\n charge = charge[1][1:]\r\n charge = float(charge.replace(\",\",\"\"))\r\n \r\n \r\n paid = data_value.split(UHC_ACU_CHARGE_TAG, data_value.count(UHC_ACU_CHARGE_TAG))\r\n paid = paid[1].split(\" \", paid[1].count(\" \"))\r\n #paid = paid[3][1:]\r\n #paid = float(paid.replace(\",\",\"\"))\r\n if (paid[-1].startswith(\"$\")):\r\n paid = paid[-1]\r\n else:\r\n paid = paid[-2]\r\n \r\n #begin to write into patients' claims\r\n searching( income_path, last_name, first_name, charge, dos_d, dos_m, dos_y, paid)\r\n #end to write into patients' claims\r\n else:\r\n continue\r\n return\r\n\r\n\r\n\r\n#parsing_aetna_acu(INCOME_PATH_TEST, AETNA_ACU_PATH)\r\nparsing_aetna_pt(INCOME_PATH_TEST, AETNA_PT_PATH)\r\n#parsing_cigna_acu(INCOME_PATH_TEST, CIGNA_ACU_PATH)\r\n#parsing_uhc_acu(INCOME_PATH_TEST, UHC_ACU_PATH)\r\n \r\n\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.5505541563034058, "alphanum_fraction": 0.5679710507392883, "avg_line_length": 33.65322494506836, "blob_id": "690bcbb2987627fa789e209f4b67ea3a8aa2e875", "content_id": "4151fc090087fb8b5229e8e00dc65238722e6199", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4421, "license_type": "no_license", "max_line_length": 131, "num_lines": 124, "path": "/report_gather.py", "repo_name": "gedamia/clinic-income", "src_encoding": "UTF-8", "text": "import xlrd\r\nimport openpyxl\r\nimport sys\r\nimport os\r\nimport re\r\nimport time\r\nfrom openpyxl import load_workbook\r\nfrom openpyxl.reader.excel import load_workbook\r\nfrom openpyxl.workbook import Workbook\r\nfrom openpyxl.styles import Color, Fill\r\nfrom openpyxl.cell import Cell\r\nimport uuid\r\n\r\nimport logging\r\nimport logging.handlers \r\n'''logging.basicConfig(level=logging.DEBUG,\r\n format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',\r\n datefmt='%a, %d %b %Y %H:%M:%S',\r\n filename='myapp.log',\r\n filemode='w')'''\r\n\r\nfrom openpyxl.styles import numbers, is_date_format\r\n\r\nACCOUNTING_PATH = r\"C:\\Evelyn\\accounting\"\r\nREPORT_PATH = r\"C:\\Evelyn\\report\\2018 report\"\r\nLOG_FILE = 'claims_log.txt'\r\nREPORT_KIND_ACU = \"ACU\"\r\nREPORT_KIND_CHIRO = \"CHIRO\"\r\nREPORT_KIND_PT = \"PT\"\r\nREPORT_KIND_ALL= \"ALL\"\r\n\r\ndef data_collection(data_path, begin_date, end_date, report_ws_acu, report_ws_chiro, report_ws_pt,report_kind = REPORT_KIND_ALL,): \r\n for filename in os.listdir(data_path):\r\n fp = os.path.join(data_path, filename)\r\n if os.path.isfile(fp):\r\n data = xlrd.open_workbook(fp)\r\n data_table = data.sheets()[0]\r\n\r\n for i in range(data_table.nrows):\r\n if data_table.ncols < 10:\r\n continue\r\n record_date = data_table.cell(i, 9).value\r\n record_bill_amount = data_table.cell(i, 6).value\r\n if record_date != \"\" and isinstance(record_date, float):\r\n [r_year,r_month,r_day,r_hour,r_minitue,r_second] = xlrd.xldate_as_tuple(record_date, 0)\r\n if (date_compare(r_year, r_month, r_day, begin_date[0:4], begin_date[4:6], begin_date[6:]) and\r\n date_compare(end_date[0:4],end_date[4:6],end_date[6:], r_year, r_month, r_day) ):\r\n report_ws_acu.append(data_table.row_values(i))\r\n report_ws_chiro.append(data_table.row_values(i))\r\n report_ws_pt.append(data_table.row_values(i))\r\n else:\r\n continue\r\n else:\r\n continue\r\n else:\r\n data_collection(fp, begin_date, end_date, report_ws_acu, report_ws_chiro, report_ws_pt, report_kind)\r\n\r\ndef date_compare(year0,mon0,day0, year1,mon1,day1):\r\n year0 = int(year0)\r\n mon0 = int(mon0)\r\n day0 = int(day0)\r\n year1 = int(year1)\r\n mon1 = int(mon1)\r\n day1 = int(day1)\r\n \r\n \r\n if year0 > year1: \r\n return True\r\n elif year0 == year1 :\r\n if mon0 > mon1:\r\n return True\r\n elif mon0 == mon1:\r\n if day0 >= day1:\r\n return True\r\n else:\r\n return False\r\n else:\r\n return False\r\n else :\r\n return False\r\n \r\n#begin_date & end_date format [yyyymmdd]\r\ndef doing_financial_report(begin_date, end_date, report_kind = REPORT_KIND_ALL):\r\n acu_report_name = REPORT_PATH+\"\\\\\"+begin_date+\"-\"+end_date+\"-\"+REPORT_KIND_ACU+\".xlsx\"\r\n chiro_report_name = REPORT_PATH+\"\\\\\"+begin_date+\"-\"+end_date+\"-\"+REPORT_KIND_CHIRO+\".xlsx\"\r\n pt_report_name = REPORT_PATH+\"\\\\\"+begin_date+\"-\"+end_date+\"-\"+REPORT_KIND_PT+\".xlsx\"\r\n if os.path.exists(acu_report_name):\r\n os.remove(acu_report_name)\r\n if os.path.exists(chiro_report_name):\r\n os.remove(chiro_report_name)\r\n if os.path.exists(pt_report_name):\r\n os.remove(pt_report_name)\r\n #acu\r\n acu_wb = openpyxl.Workbook()\r\n acu_ws = acu_wb.active\r\n #chiro\r\n chiro_wb = openpyxl.Workbook()\r\n chiro_ws = chiro_wb.active\r\n #pt\r\n pt_wb = openpyxl.Workbook()\r\n pt_ws = pt_wb.active\r\n \r\n data_collection(ACCOUNTING_PATH, begin_date, end_date, acu_ws, chiro_ws, pt_ws, report_kind)\r\n\r\n if REPORT_KIND_ACU == report_kind:\r\n #acu\r\n acu_wb.save(acu_report_name)\r\n elif REPORT_KIND_CHIRO == report_kind:\r\n #chiro\r\n chiro_wb.save(chiro_report_name)\r\n elif REPORT_KIND_PT == report_kind:\r\n #pt\r\n pt_wb.save(pt_report_name)\r\n else:\r\n #acu\r\n acu_wb.save(acu_report_name)\r\n #chiro\r\n chiro_wb.save(chiro_report_name)\r\n #pt\r\n pt_wb.save(pt_report_name)\r\n \r\n \r\nif '005056c00008' == uuid.UUID(int = uuid.getnode()).hex[-12:] :\r\n doing_financial_report(\"20180224\",\"20180226\")\r\n" }, { "alpha_fraction": 0.807692289352417, "alphanum_fraction": 0.807692289352417, "avg_line_length": 25, "blob_id": "f98a148d1cd7e2da375f46c0023e45ed2b603aa0", "content_id": "3e3c06172144af8106d9f5816350b259fb82b783", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 52, "license_type": "no_license", "max_line_length": 35, "num_lines": 2, "path": "/README.md", "repo_name": "gedamia/clinic-income", "src_encoding": "UTF-8", "text": "# clinic-income\nthe management of the clinic income\n" }, { "alpha_fraction": 0.6013838648796082, "alphanum_fraction": 0.6028881072998047, "avg_line_length": 31.57575798034668, "blob_id": "4efafa8175a45e19b2404e153386919cde284b40", "content_id": "0c8a7aa5e61a9f1e3f7c60e955eb4444f9f1c7cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3324, "license_type": "no_license", "max_line_length": 120, "num_lines": 99, "path": "/claims.py", "repo_name": "gedamia/clinic-income", "src_encoding": "UTF-8", "text": "import xlrd\r\nimport openpyxl\r\nimport sys\r\nimport os\r\nimport re\r\nfrom openpyxl import load_workbook\r\n#import pdb\r\n#import ipdb\r\nfrom openpyxl.reader.excel import load_workbook\r\nfrom openpyxl.workbook import Workbook\r\nfrom openpyxl.styles import Color, Fill\r\nfrom openpyxl.cell import Cell\r\n\r\n\r\n\r\nimport logging\r\nimport logging.handlers \r\n'''logging.basicConfig(level=logging.DEBUG,\r\n format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',\r\n datefmt='%a, %d %b %Y %H:%M:%S',\r\n filename='myapp.log',\r\n filemode='w')'''\r\n \r\n\r\n\r\nfrom openpyxl.styles import numbers, is_date_format\r\n\r\n\r\nCLAIMS_PATH = r\"C:\\Evelyn\\report\\owe money.xlsx\"\r\nACCOUNTING_PATH = r\"C:\\Evelyn\\accounting\"\r\nMONEY_PATH = ACCOUNTING_PATH+r\"\\money patient\"\r\nREGULAR_PATH = ACCOUNTING_PATH+r\"\\regular patient\"\r\n\r\n\r\nLOG_FILE = 'claims_log.txt'\r\n\r\n'''def search_file(path, last_name, first_name, birth):\r\n for filename in os.listdir(path):\r\n fp = os.path.join(path, filename)\r\n if not os.path.isfile(fp):\r\n #return search_file(fp, last_name, first_name, birth)\r\n search_file(fp, last_name, first_name, birth)\r\n else:\r\n if bool( re.search(last_name, filename, re.IGNORECASE) and re.search(first_name, filename, re.IGNORECASE) ):\r\n #pdb.set_trace()\r\n return fp\r\n else:\r\n continue\r\n return fp'''\r\n\r\ndef search_in_money_patient(last_name, first_name, birth):\r\n for filename in os.listdir(MONEY_PATH):\r\n fp = os.path.join(MONEY_PATH, filename)\r\n if os.path.isfile(fp):\r\n if bool( re.search(last_name, filename, re.IGNORECASE) and re.search(first_name, filename, re.IGNORECASE) ):\r\n return fp\r\n else:\r\n continue\r\n\r\ndef search_in_regular_patient(last_name, first_name, birth):\r\n for filename in os.listdir(REGULAR_PATH):\r\n fp = os.path.join(REGULAR_PATH, filename)\r\n if os.path.isfile(fp):\r\n if bool( re.search(last_name, filename, re.IGNORECASE) and re.search(first_name, filename, re.IGNORECASE) ):\r\n return fp\r\n else:\r\n continue\r\n\r\ndef append_claim(fp,v):\r\n wb = openpyxl.load_workbook(fp)\r\n name_list = wb.get_sheet_names()\r\n my_sheet = wb.get_sheet_by_name(name_list[0])\r\n my_sheet.append(v)\r\n wb.save(fp)\r\n\r\ndata = xlrd.open_workbook(CLAIMS_PATH)\r\n\r\nclaims_table = data.sheets()[0]\r\n\r\nfor i in range(claims_table.nrows):\r\n last_name = claims_table.cell(i,3).value\r\n first_name = claims_table.cell(i,4).value\r\n birth = claims_table.cell(i,5).value\r\n fp = search_in_regular_patient(last_name, first_name, birth)\r\n # in the regular patient\r\n if fp:\r\n append_claim(fp, claims_table.row_values(i))\r\n else:\r\n fp = search_in_money_patient(last_name, first_name, birth)\r\n #in the money patient\r\n if fp:\r\n append_claim(fp, claims_table.row_values(i))\r\n else:\r\n #create new file in the accounting path\r\n new_wb = openpyxl.Workbook()\r\n new_ws = new_wb.active\r\n # append data\r\n new_ws.append(claims_table.row_values(i))\r\n new_wb.save(REGULAR_PATH+\"\\\\\"+last_name+\",\"+first_name+\".xlsx\")\r\n" } ]
5
daps0220/Decision-Maker-AI-
https://github.com/daps0220/Decision-Maker-AI-
6533b67c7493c301f3313b8667fd42b849bfb75d
43122ff82f680bc28e44937b3fdf2a736fb4de2d
80de1de784c1b5bca25333a9471c8c9864e99431
refs/heads/master
2021-01-10T12:45:36.128132
2016-02-04T05:59:35
2016-02-04T05:59:35
51,054,691
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6135634183883667, "alphanum_fraction": 0.6211878061294556, "avg_line_length": 33.55555725097656, "blob_id": "d7530e901bb09e2691b4899fb23035c9e7f709b8", "content_id": "437444cc9c053195259550be1f9c0141914469f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2492, "license_type": "no_license", "max_line_length": 115, "num_lines": 72, "path": "/Project Phase - 3/PP-3/agent_dpatel96.py", "repo_name": "daps0220/Decision-Maker-AI-", "src_encoding": "UTF-8", "text": "\nimport numpy as np\n#from simulate_agents_phase3 import simulate_agents # trying to import but gives error.\nfrom agents import Agent_single_sklearn, Agent\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.naive_bayes import BernoulliNB\nfrom sklearn.svm import SVC\nfrom sklearn import cross_validation \nfrom sklearn.metrics import accuracy_score,log_loss,precision_score\nfrom sklearn.calibration import calibration_curve\n\n\n#method from simulate_agents_phase3 just for REFERNCE.\ndef simulate_agents(agents, value, X, y, price_trials = 10):\n \n agent_wealths = {}\n \n for agent in agents:\n agent_wealths[agent] = 0\n \n num_products = X.shape[0]\n \n for p in range(num_products): \n \n # Excellent or not?\n excellent = (y[p] == 'Excellent')\n \n for agent in agents:\n prob = agent.predict_prob_of_excellent(X[p])\n # try a range of prices \n for pt in range(price_trials): \n price = ((2*pt+1)*value)/(2*price_trials) \n if agent.will_buy(value, price, prob):\n agent_wealths[agent] -= price\n if excellent:\n agent_wealths[agent] += value\n return agent_wealths\n\n#my Agent class\nclass Agent_dpatel96(Agent):\n\n\t\n\n\tdef choose_the_best_classifier(self, X_train, y_train, X_val, y_val):\n\t\t\n\t\t# array agents to calucate wealth on validation dataset. \n\t\tagents = []\n \n \tagents.append(Agent_single_sklearn(\"bnb\", BernoulliNB()))\n \n \t\tagents.append(Agent_single_sklearn(\"lr\", LogisticRegression()))\n \n \tagents.append(Agent_single_sklearn(\"svc\", SVC(kernel='poly', degree=4, probability=True, random_state=0)))\n\n\t\t#Train the agents\n\t \tfor agent in agents:\n \t\tagent.train(X_train, y_train, X_val, y_val)\n\n\t\t# Simulate the agents on test\n \tvalue = 1000 #fixed value given by professor.\n \tagent_wealths = simulate_agents(agents, value, X_val, y_val)\n\t\twealths = list(agent_wealths.values())\n\t\tclass_keys = list(agent_wealths.keys())\n\t\tagent_name=str(class_keys[wealths.index(max(wealths))]) \n\t\t\n\t\t#returned best choosen classifier.\n\t\tif(agent_name == \"Agent_bnb\"):\n\t\t\treturn BernoulliNB()\t\t\n\t\telif(agent_name == 'Agent_lr'):\n\t\t\treturn LogisticRegression()\t\n\t\telif(agent_name=='Agent_svc'):\n\t\t\treturn SVC(kernel='poly', degree=4, probability=True, random_state=0)\n\t\t\n" }, { "alpha_fraction": 0.747474730014801, "alphanum_fraction": 0.7676767706871033, "avg_line_length": 23.75, "blob_id": "240ffe3acd8160c109a1268be2978dcf3abc8ea5", "content_id": "bc90c9d0b2bc01860ec4ecd3509bc22108501c81", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 99, "license_type": "no_license", "max_line_length": 37, "num_lines": 4, "path": "/Project Phase -1/phase1-code/agent_dpatel96.py", "repo_name": "daps0220/Decision-Maker-AI-", "src_encoding": "UTF-8", "text": "from agents import Agent\nclass Agent_dpatel96(Agent):\n\tdef will_buy(self,value,price,prob):\n\t\tpass\n" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.75, "avg_line_length": 20, "blob_id": "a156daab2d82e5b257da7816e9d958d7a8c4a929", "content_id": "ae70d8b4d61b45951cae0a6f67069082a0997bc6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 20, "num_lines": 1, "path": "/README.md", "repo_name": "daps0220/Decision-Maker-AI-", "src_encoding": "UTF-8", "text": "# Decision-Maker-AI-" }, { "alpha_fraction": 0.5100578665733337, "alphanum_fraction": 0.5194268226623535, "avg_line_length": 35.220001220703125, "blob_id": "a3bdae44390353601c23eb916004289c4cbfdee6", "content_id": "43487c31e35e33b70d6033a2d6c77a421028cb03", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3629, "license_type": "no_license", "max_line_length": 112, "num_lines": 100, "path": "/Project Phase - 3/agent_dpatel96.py", "repo_name": "daps0220/Decision-Maker-AI-", "src_encoding": "UTF-8", "text": "\nimport numpy as np\n#from simulate_agents_phase3 import simulate_agents # trying to import but gives error.\nfrom agents import Agent_single_sklearn, Agent\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.naive_bayes import BernoulliNB\nfrom sklearn.svm import SVC\nfrom sklearn import cross_validation \nfrom sklearn.metrics import accuracy_score,log_loss,precision_score\nfrom sklearn.calibration import calibration_curve\n\n\n#method from simulate_agents_phase3 just for REFERNCE.\ndef simulate_agents(agents, value, X, y, price_trials = 10):\n \n agent_wealths = {}\n \n for agent in agents:\n agent_wealths[agent] = 0\n \n num_products = X.shape[0]\n \n for p in range(num_products): \n \n # Excellent or not?\n excellent = (y[p] == 'Excellent')\n \n for agent in agents:\n prob = agent.predict_prob_of_excellent(X[p])\n # try a range of prices \n for pt in range(price_trials): \n price = ((2*pt+1)*value)/(2*price_trials) \n if agent.will_buy(value, price, prob):\n agent_wealths[agent] -= price\n if excellent:\n agent_wealths[agent] += value\n return agent_wealths\n\n#my Agent class\nclass Agent_dpatel96(Agent):\n\n\tdef inaccurateSum(self,predicted,actual,predicted_proba):\n inaccurate_sum = 0\n\n totalMisPredictions = 0.0\n sumOfProbsOfMisPredictions = 0.0\n \n for i in range(0,len(predicted)):\n totalMisPredictions = totalMisPredictions + 1\n if (actual[i] == 'Trash' and predicted[i] == 'Excellent'):\n sumOfProbsOfMisPredictions = sumOfProbsOfMisPredictions + predicted_proba[i][0]\n elif (actual[i] == 'Excellent' and predicted[i] == 'Trash'):\n sumOfProbsOfMisPredictions = sumOfProbsOfMisPredictions + predicted_proba[i][1]\n\n return sumOfProbsOfMisPredictions\n \n \n def choose_the_best_classifier(self, X_train, y_train, X_val, y_val):\n clf = []\n\n bern_clf = BernoulliNB()\n bern_clf.fit(X_train, y_train)\n \n \n logi_clf = LogisticRegression()\n logi_clf.fit(X_train, y_train)\n \n svc_clf = SVC(degree=4,probability=True,random_state=0)\n svc_clf.fit(X_train, y_train)\n \n clf.append(bern_clf)\n clf.append(logi_clf)\n clf.append(svc_clf)\n\n bst_clf_prb = 500\n inaccurate_sum = 500\n bestClassifier = svc_clf\n\n x = Agent_dpatel96(\"dpatel96\")\n\n\n for classifer in clf:\n \n X = classifer.predict(X_val)\n Xprob = classifer.predict_proba(X_val) \n inaccurate_sum = x.inaccurateSum(X,y_val,Xprob)\n \n if (inaccurate_sum) < bst_clf_prb: \n bst_clf_prb = inaccurate_sum\n bestClassifier = classifer\n\n best = None\n if bestClassifier == bern_clf :\n best = BernoulliNB()\n elif bestClassifier == logi_clf :\n best = LogisticRegression()\n else :\n best = SVC(degree=4,probability=True,random_state=0)\n\n return best\n\t \n" }, { "alpha_fraction": 0.5683296918869019, "alphanum_fraction": 0.5907447338104248, "avg_line_length": 29.065217971801758, "blob_id": "b30a8c550b3de5eca331bbbadbc8d3c38ff2d9ec", "content_id": "1c47fa714bfa7d30995f958ad2ee734f0ff73d03", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1383, "license_type": "no_license", "max_line_length": 106, "num_lines": 46, "path": "/Project Phase - 2/agent_dpatel96.py", "repo_name": "daps0220/Decision-Maker-AI-", "src_encoding": "UTF-8", "text": "from agents import Agent\nimport numpy as np\nclass Agent_dpatel96(Agent):\n\t\n\tdef train(self,X,y):\n\t\tcount_Excellent = (y=='Excellent').sum() \n\t\tglobal prob_Excellent\n\t\tprob_Excellent = count_Excellent/float(y.shape[0])\n\t\tglobal prob_Trash\n \t\tprob_Trash = 1 - prob_Excellent\n\t\tcount_Trash = y.shape[0] - count_Excellent\n\t\tglobal count_E\n\t\tcount_E = np.zeros((X.shape[1],2),dtype = np.int) # All Features are binary \n\t\tglobal prob_E\n\t\tprob_E = np.zeros((X.shape[1],2),dtype = np.float)\n\t\tfor j in range(X.shape[1]):\n\t\t\tfor i in range(X.shape[0]):\n\t\t\t\tif(X[i][j] == 1 and y[i] == 'Excellent'):\n\t\t\t\t\tcount_E[j,0] = count_E[j,0] + 1\n\t\t\t\telif(X[i][j]==1 and y[i] == 'Trash'):\n\t\t\t\t\tcount_E[j,1] = count_E[j,1] +1\n\t\t\n\t\tfor j in range(prob_E.shape[0]):\n\t\t\tprob_E[j,0] = count_E[j,0]/float(count_Excellent)\n\t\t\tprob_E[j,1] = count_E[j,1]/float(count_Trash)\n\n\tdef predict_prob_of_excellent(self, x):\n\t\t\n\t\tprob_X_E = np.zeros(len(x))\n\t\tprob_X_T = np.zeros(len(x))\n\t\t\t\n\t\tfor i in range(len(x)):\n\t\t\tif(x[i] == 1):\n\t\t\t\tprob_X_E[i] = prob_E[i,0]\t\t\t\n\t\t\t\tprob_X_T[i] = prob_E[i,1]\n\t\t\telse:\n\t\t\t\tprob_X_E[i] = 1\t\t\t\n\t\t\t\tprob_X_T[i] = 1\n\n\t\tprob_X_all_E = 1\n\t\tprob_X_all_T = 1\n\t\tfor i in range(len(prob_X_E)):\n\t\t\tprob_X_all_E = prob_X_all_E * prob_X_E[i]\n\t\t\tprob_X_all_T = prob_X_all_T * prob_X_T[i]\n\n\t\treturn (prob_X_all_E * prob_Excellent) / ((prob_X_all_E * prob_Excellent) + (prob_X_all_T * prob_Trash))\n" } ]
5
SSupattapone/Birthday-quiz
https://github.com/SSupattapone/Birthday-quiz
4ebf15d45388628804af71874050530add4c3875
03e1c9abffc2463f9520d86c927c81e6d2b20741
451efa6450335d0d58e0466f0f2e59f1615a370e
refs/heads/master
2021-01-18T14:21:59.775061
2015-10-13T15:12:41
2015-10-13T15:12:41
43,016,120
0
0
null
2015-09-23T17:27:33
2015-09-15T17:48:49
2015-09-22T18:59:25
null
[ { "alpha_fraction": 0.6182407140731812, "alphanum_fraction": 0.6516321897506714, "avg_line_length": 46.78571319580078, "blob_id": "653e70a111028f942689b3316219e20d42673b8b", "content_id": "3e218eb7a1f78bde30a1af4124afbd75d4db62ba", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4013, "license_type": "permissive", "max_line_length": 103, "num_lines": 84, "path": "/birthday.py", "repo_name": "SSupattapone/Birthday-quiz", "src_encoding": "UTF-8", "text": "\"\"\"\nbirthday.py\nAuthor: Sam Supattapone\nCredit: none\nAssignment: Birthday\n\nYour program will ask the user the following questions, in this order:\n\n1. Their name.\n2. The name of the month they were born in (e.g. \"September\").\n3. The year they were born in (e.g. \"1962\").\n4. The day they were born on (e.g. \"11\").\n\nIf the user's birthday fell on October 31, then respond with:\n\n You were born on Halloween!\n\nIf the user's birthday fell on today's date, then respond with:\n\n Happy birthday!\n\nOtherwise respond with a statement like this:\n\n Peter, you are a winter baby of the nineties.\n\nExample Session\n\n Hello, what is your name? Eric\n Hi Eric, what was the name of the month you were born in? September\n And what year were you born in, Eric? 1972\n And the day? 11\n Eric, you are a fall baby of the stone age.\n\"\"\"\nfrom datetime import datetime\nfrom calendar import month_name\ntodaymonth = datetime.today().month\ntodaydate = datetime.today().day\n\nname = input(\"Hello, what is your name? \")\nmonth = input(\"Hi {0}, what was the name of the month you were born in? \" .format(name))\nyear = input(\"And what year were you born in, {0}? \" .format(name))\nday = input(\"And the day? \")\n\nif month == \"October\" and int(day) == 31:\n print(\"You were born on Halloween!\")\n\nelif month == month_name[todaymonth] and int(day) == todaydate:\n print(\"Happy birthday!\")\n\nelif int(year) < 1980 and (month == \"December\" or month == \"January\" or month == \"February\"):\n print(\"{0}, you are a winter baby of the Stone Age.\" .format(name))\nelif int(year) < 1980 and (month == \"March\" or month == \"April\" or month == \"May\"):\n print(\"{0}, you are a spring baby of the Stone Age.\" .format(name))\nelif int(year) < 1980 and (month == \"June\" or month == \"July\" or month == \"August\"):\n print(\"{0}, you are a summer baby of the Stone Age.\" .format(name))\nelif int(year) < 1980 and (month == \"September\" or month == \"October\" or month == \"November\"):\n print(\"{0}, you are a fall baby of the Stone Age.\" .format(name))\n\nelif 1980 <= int(year) <= 1989 and (month == \"December\" or month == \"January\" or month == \"February\"):\n print(\"{0}, you are a winter baby of the eighties.\" .format(name))\nelif 1980 <= int(year) <= 1989 and (month == \"March\" or month == \"April\" or month == \"May\"):\n print(\"{0}, you are a spring baby of the eighties.\" .format(name))\nelif 1980 <= int(year) <= 1989 and (month == \"June\" or month == \"July\" or month == \"August\"):\n print(\"{0}, you are a summer baby of the eighties.\" .format(name))\nelif 1980 <= int(year) <= 1989 and (month == \"September\" or month == \"October\" or month == \"November\"):\n print(\"{0}, you are a fall baby of the eighties.\" .format(name))\n\nelif 1990 <= int(year) <= 1999 and (month == \"December\" or month == \"January\" or month == \"February\"):\n print(\"{0}, you are a winter baby of the nineties.\" .format(name))\nelif 1990 <= int(year) <= 1999 and (month == \"March\" or month == \"April\" or month == \"May\"):\n print(\"{0}, you are a spring baby of the nineties.\" .format(name))\nelif 1990 <= int(year) <= 1999 and (month == \"June\" or month == \"July\" or month == \"August\"):\n print(\"{0}, you are a summer baby of the nineties.\" .format(name))\nelif 1990 <= int(year) <= 1999 and (month == \"September\" or month == \"October\" or month == \"November\"):\n print(\"{0}, you are a fall baby of the nineties.\" .format(name))\n\nelif 2000 <= int(year) and (month == \"December\" or month == \"January\" or month == \"February\"):\n print(\"{0}, you are a winter baby of the two thousands.\" .format(name))\nelif 2000 <= int(year) and (month == \"March\" or month == \"April\" or month == \"May\"):\n print(\"{0}, you are a spring baby of the two thousands.\" .format(name))\nelif 2000 <= int(year) and (month == \"June\" or month == \"July\" or month == \"August\"):\n print(\"{0}, you are a summer baby of the two thousands.\" .format(name))\nelif 2000 <= int(year) and (month == \"September\" or month == \"October\" or month == \"November\"):\n print(\"{0}, you are a fall baby of the two thousands.\" .format(name))" } ]
1
DElr51/EasyQuote
https://github.com/DElr51/EasyQuote
dd21f26bcb65f2e519cf6008c70cfbd558c8e022
988271b62a7cfc4931ce25670115a617dffdb833
d4325f316c4c95a0394416d1219e5a3862b3aad8
refs/heads/master
2020-12-20T11:59:39.248888
2020-04-01T17:06:02
2020-04-01T17:06:02
236,068,665
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4589992165565491, "alphanum_fraction": 0.46947863698005676, "avg_line_length": 51.04166793823242, "blob_id": "b4c8fdfc9e727ac0a73648e1854192784af7ae33", "content_id": "91cf4a8dfd72e7b25dc8b122b3027d3c74001daa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4377, "license_type": "no_license", "max_line_length": 182, "num_lines": 72, "path": "/mainGUImw.py", "repo_name": "DElr51/EasyQuote", "src_encoding": "UTF-8", "text": "import PySimpleGUI as sg \r\nimport sqlite3 as db\r\n\r\n\r\nsg.theme('DarkBlue2') #тема оформления, меняется по желанию\r\nconnection = db.connect('mainGUIdb.db') #подключаю дб\r\ncursor = connection.cursor()\r\n\r\ncursor.execute('CREATE TABLE IF NOT EXISTS quotes (id INTEGER PRIMARY KEY, author TEXT, text TEXT)')\r\n\r\n# верхняя панель меню\r\nmenu_def = [['Файл', ['Открыть', 'Сохранить', 'Выйти!', 'Свойства']], \r\n ['Изменить', ['Вставить', ['Конвертированно', 'Без изменений', 'не туда, сенпай!'], 'Отмена'], ]]\r\n\r\nlayout = [[sg.Menu(menu_def, tearoff=True)], # оформление окна номер 1 для внесения данных\r\n [sg.Text('Пополнение цитатника:', justification='center', font=('Segoe UI', 12))],\r\n [sg.Text('Автор:', size=(6,1), font=('Segoe UI', 12), justification=\"center\")], [sg.Input(key=\"_INPUTAUTHOR_\", justification=\"center\", size=(40,1), font=('Segoe UI', 10))],\r\n [sg.Text('Текст:', font=('Segoe UI', 12))],\r\n [sg.Multiline(key='_INPUTTEXT_', size=(40,15))], \r\n [sg.Button('Записать!')], [sg.Text(key='__INPUTSUCCESS__', size=(8,1), text_color='green')],\r\n [sg.Button(button_text=\"Получить все записи\")],\r\n [sg.Multiline(size=(40,20), key='_OUTPUTTOUSER_', font=('Segoe UI', 10), default_text='Для просмотра записей, нажмите соответствующую кнопку.')], \r\n \r\n [sg.Button('Выйти', font=('Segoe UI', 14))]]\r\n \r\n\r\n# Function to convert list's values to string\r\ndef listToString(s): \r\n str1 = \"\" \r\n for ele in s: \r\n str1 += ele \r\n return str1 \r\n \r\nrazdelitel = \"--------------------------------------------------\"\r\nsuccess = 'Успешно!'\r\n\r\nwin = sg.Window('Цитатник', layout) #обозначаем что такое окно 1 \r\nwhile True:\r\n ev, vals = win.Read(timeout=100) #считываем данные из окна через такой-то промежуток\r\n if ev is None: #если закрываем, оно, собсна, закрывается \r\n break\r\n\r\n author = vals['_INPUTAUTHOR_'] # Ввод с поля Автора\r\n text = vals['_INPUTTEXT_'] # Ввод с поля Текста\r\n\r\n if ev == 'Записать!': #Метод записи\r\n cursor.execute('INSERT INTO quotes(author, text) VALUES (?, ?)', (str(author), str(text ))) #Забираем из вышеобъявленных контейнеров данные\r\n connection.commit() # ... и вносим их в базу данных\r\n win['__INPUTSUCCESS__'].update(success) #небольшая проверочка \r\n \r\n if ev == 'Получить все записи': \r\n valuesFromDB = []\r\n with connection: \r\n cursor.execute('SELECT * FROM quotes') \r\n while True: \r\n row = cursor.fetchone() \r\n if row == None:\r\n break\r\n \r\n valuesFromDB.extend('ID:')\r\n valuesFromDB.extend(str(row[0]))\r\n valuesFromDB.extend('\\n')\r\n valuesFromDB.extend('Автор:')\r\n valuesFromDB.extend(str(row[1]))\r\n valuesFromDB.extend('\\n')\r\n \r\n valuesFromDB.extend(str(row[2]))\r\n valuesFromDB.extend('\\n')\r\n valuesFromDB.extend(razdelitel)\r\n valuesFromDB.extend('\\n')\r\n \r\n win['_OUTPUTTOUSER_'].update(listToString(valuesFromDB))" } ]
1
Leeaandrob/sentiment-analysis
https://github.com/Leeaandrob/sentiment-analysis
c093abd41f5d184b2ce7757975d5cc57e3c9ab89
365cec774e30c563237a0e6f9476e32695aad979
acc8ba62902609997c753a127c643d3cc2473550
refs/heads/master
2020-07-03T19:53:21.488778
2017-01-30T01:11:58
2017-01-30T01:11:58
74,233,315
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.691082775592804, "alphanum_fraction": 0.691082775592804, "avg_line_length": 27.545454025268555, "blob_id": "9cf2c3cf078ebfcf2a756ce0867de36008df44c3", "content_id": "640f0d22d6b5db000303b1a0dfa467f05cb69614", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 314, "license_type": "no_license", "max_line_length": 55, "num_lines": 11, "path": "/sentiment_analyst/urls.py", "repo_name": "Leeaandrob/sentiment-analysis", "src_encoding": "UTF-8", "text": "from django.conf.urls import include, url\nfrom django.contrib import admin\n\nfrom core import urls as core_urls\nfrom core.views import HomeView\n\nurlpatterns = [\n url(r'^admin/', include(admin.site.urls)),\n url(r'^$', HomeView.as_view(), name='home'),\n url(r'^api/', include(core_urls, namespace='api')),\n]\n" }, { "alpha_fraction": 0.7229437232017517, "alphanum_fraction": 0.7272727489471436, "avg_line_length": 16.769229888916016, "blob_id": "c779c5a9e149c1743737acea0331a872200709a4", "content_id": "f44e6fd04df397d26054c8414415845d7719ca09", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 231, "license_type": "no_license", "max_line_length": 60, "num_lines": 13, "path": "/core/urls.py", "repo_name": "Leeaandrob/sentiment-analysis", "src_encoding": "UTF-8", "text": "# coding: utf-8\nfrom django.conf.urls import url\n\nfrom rest_framework import routers\n\nfrom core.apis import HomeAPI\n\n\nrouter = routers.DefaultRouter()\n\nurlpatterns = [\n url(r'^sentiment/', HomeAPI.as_view(), name='sentiment')\n]\n" }, { "alpha_fraction": 0.6427915692329407, "alphanum_fraction": 0.6510559916496277, "avg_line_length": 24.325580596923828, "blob_id": "151aa297637f1a9fe7ff563343885cb6064eb287", "content_id": "c35ad5890fc172ebc0ff724305c4dd1b283b34d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2178, "license_type": "no_license", "max_line_length": 74, "num_lines": 86, "path": "/tmp/sentiment_analysis.py", "repo_name": "Leeaandrob/sentiment-analysis", "src_encoding": "UTF-8", "text": "import nltk\nfrom twython import Twython\nfrom textblob import TextBlob\nfrom textblob.sentiments import NaiveBayesAnalyzer\n\n\ndef return_trainer():\n data = open('training.txt')\n data = data.readlines()\n return [d.split('\\t') for d in data]\n\n\ndef bagOfWords(tweets):\n wordsList = []\n for (words, sentiment) in tweets:\n wordsList.extend(words)\n return wordsList\n\n\ndef wordFeatures(wordList):\n wordList = nltk.FreqDist(wordList)\n wordFeatures = wordList.keys()\n return wordFeatures\n\n\ndef getFeatures(doc):\n docWords = set(doc)\n feat = {}\n for word in wordFeatures:\n feat['contains(%s)' % word] = (word in docWords)\n return feat\n\n\ntweets = [(d[1], 'positive') if d[0] == '1' else (d[1], 'negative')\n for d in return_trainer()]\n\npositiveTweets = [data for data in tweets if data[1] == 'positive']\nnegativeTweets = [data for data in tweets if data[1] == 'negative']\n\n\ntweets = []\nfor (words, sentiment) in positiveTweets + negativeTweets:\n words_filtered = [e.lower() for e in nltk.word_tokenize(words) if len(\n e) >= 3]\n tweets.append((words_filtered, sentiment))\n\nfor t in tweets:\n print(t)\n\nwordFeatures = wordFeatures(bagOfWords(tweets))\n\n\ndata_trainer = return_trainer()\n\ntraining_set = nltk.classify.apply_features(getFeatures, tweets)\n\nclassifier = nltk.NaiveBayesClassifier.train(training_set)\n\nprint(classifier.show_most_informative_features(32))\n\n\nConsumerKey = \"6TO19L8LlJouqnztJ6hZkCgsA\"\nConsumerSecret = \"gK5dcQFlgS1lLTMPdhsqh046v48VfGYIpWqENwEhwLaFEpX0Pv\"\nAccessToken = \"257611954-EBfaOkObL04YTCB2NEC39C5GzhyTIGMGcF1TNXul\"\nAccessTokenSecret = \"YrvPzGj97TsLR9XRUp5ESbP3KxlMfezHVnmex5RNZb3y6\"\n\n\ntwitter = Twython(ConsumerKey,\n ConsumerSecret,\n AccessToken,\n AccessTokenSecret)\n\nqueryText = \"#VemPraRuaBrasil\"\nresult = twitter.search(q=queryText)\n\n\nfor status in result[\"statuses\"]:\n print(\"Tweet: {0} \\n Sentiment: {1} \\n\".format(\n status[\"text\"], classifier.classify(getFeatures(\n status[\"text\"].split()))))\n\n\nfor status in result[\"statuses\"]:\n blob = TextBlob(status['text'], analyzer=NaiveBayesAnalyzer())\n print(\"Tweet: {0} \\n Sentiment: {1} \\n\".format(status['text'],\n blob.sentiment))\n" }, { "alpha_fraction": 0.5137255191802979, "alphanum_fraction": 0.7019608020782471, "avg_line_length": 16, "blob_id": "b86244f9ee505f91c37f5ec4351c635f97472b25", "content_id": "6c9700b103b02761040470c53ec90937cff40a23", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 255, "license_type": "no_license", "max_line_length": 26, "num_lines": 15, "path": "/requirements.txt", "repo_name": "Leeaandrob/sentiment-analysis", "src_encoding": "UTF-8", "text": "dj-database-url==0.4.0\nDjango==1.10.3\ngunicorn==19.4.5\npsycopg2==2.6.1\nwhitenoise==2.0.6\npython-decouple==3.0\nunipath==1.1\ndjango-sslify==0.2.7\nraven==5.20.0\ndjangorestframework==3.5.3\nrequests==2.9.1\npandas==0.19.2\nsklearn==0.0\nscipy==0.18.1\nnltk==3.2.1\n" }, { "alpha_fraction": 0.6563106775283813, "alphanum_fraction": 0.6640776991844177, "avg_line_length": 27.61111068725586, "blob_id": "cf02dfc5c78c2845a937f54d4b354168ed0d58a5", "content_id": "59e3497ec1048038b5106d95de42f249132c6125", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 515, "license_type": "no_license", "max_line_length": 57, "num_lines": 18, "path": "/core/tests/test_api.py", "repo_name": "Leeaandrob/sentiment-analysis", "src_encoding": "UTF-8", "text": "# coding: utf-8\nfrom unittest.mock import patch\n\nfrom django.urls import reverse\n\nfrom rest_framework.test import APITestCase, APIClient\n\n\nclass CommentsApiView(APITestCase):\n @patch('core.apis.HomeAPI.get_all_comments')\n def _get_comments_from_query_string(self, _comments):\n client = APIClient()\n\n response = client.get(reverse('api:comments'),\n {'page_name': 'nytimes'})\n\n self.assertEqual(response.status_code, 200)\n _comments.assert_called_with()\n" }, { "alpha_fraction": 0.6416184902191162, "alphanum_fraction": 0.650288999080658, "avg_line_length": 24.629629135131836, "blob_id": "ae79816f10e9648a86be06620df8e268aa5c4c01", "content_id": "b9e7d7409f081e66a63aad3e9566834bbcf21e63", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 692, "license_type": "no_license", "max_line_length": 74, "num_lines": 27, "path": "/core/apis.py", "repo_name": "Leeaandrob/sentiment-analysis", "src_encoding": "UTF-8", "text": "# coding: utf-8\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom sklearn.externals import joblib\n\n\ndef predict(text, fit_file='sentiment_analyst/static//bag_words_SVC.csv'):\n pipe = joblib.load(fit_file)\n words = text\n return pipe.predict([words])\n\n\nclass HomeAPI(APIView):\n def get(self, request):\n text = request.GET.get('text')\n response = predict(text)\n\n if response.tolist()[0] == '1':\n response = 'Positive'\n else:\n response = 'Negative'\n\n result = {'sentiment': response, 'text': text}\n\n return Response(result, status.HTTP_200_OK)\n" }, { "alpha_fraction": 0.696107804775238, "alphanum_fraction": 0.7020958065986633, "avg_line_length": 30.809524536132812, "blob_id": "d556e9ce35d5e8400ce6ae3a2e1f29cc735daa20", "content_id": "cb77d36ff7c54ada4b741f68b9b96dd6b78d8e49", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 668, "license_type": "no_license", "max_line_length": 60, "num_lines": 21, "path": "/core/tests/test_views.py", "repo_name": "Leeaandrob/sentiment-analysis", "src_encoding": "UTF-8", "text": "# coding: utf-8\nfrom unittest.mock import patch\n\nfrom django.test import TestCase\nfrom django.urls import reverse\n\n\nclass HomeViewTest(TestCase):\n @patch('core.views.HomeView.get_all_comments')\n def test_correct_response(self, _comments):\n response = self.client.get(reverse('home'))\n\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'core/index.html')\n\n @patch('core.views.HomeView.get_all_comments')\n def test_comments_context_in_html(self, _comments):\n response = self.client.get(reverse('home'))\n\n self.assertIn('comments', response.context)\n _comments.assert_called_once_with()\n" }, { "alpha_fraction": 0.6131198406219482, "alphanum_fraction": 0.6182851195335388, "avg_line_length": 29.73015785217285, "blob_id": "a98a6b0f1af84e99a34a53f4f513c72114ea4c6d", "content_id": "c6784235c100e9049f9644b84c2c3d187df745c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1936, "license_type": "no_license", "max_line_length": 80, "num_lines": 63, "path": "/core/management/commands/activate.py", "repo_name": "Leeaandrob/sentiment-analysis", "src_encoding": "UTF-8", "text": "# coding: utf-8\nimport csv\nfrom nltk.corpus import stopwords\nfrom pandas import DataFrame\n\nimport sklearn\nfrom sklearn.externals import joblib\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.svm import SVC\n\nfrom django.core.management.base import BaseCommand\n\n\ndef pre_processor(text, cat):\n stops = set(stopwords.words(\"english\"))\n words = text.lower().split()\n words = ' '.join([w for w in words if w not in stops])\n return words, cat\n\n\ndef train(df, fit_file):\n df = df.dropna()\n train_size = 0.8\n X_train, X_test, Y_train, Y_test = sklearn.model_selection.train_test_split(\n df.text, df.sentiment, train_size=train_size\n )\n\n model = SVC()\n vect = CountVectorizer(analyzer=\"word\",\n tokenizer=None,\n preprocessor=None,\n stop_words=None)\n\n pipe = Pipeline([('vect', vect), ('svc', model)])\n pipe.fit(X_train, Y_train)\n pipe.fit(df.text, df.sentiment)\n joblib.dump(pipe, fit_file)\n\n\ndef predict(text, fit_file='sentiment_analyst/static//bag_words_SVC.csv'):\n pipe = joblib.load(fit_file)\n words = text\n return pipe.predict([words])\n\n\nclass Command(BaseCommand):\n def handle(self, *args, **options):\n csvfile = open('sentiment_analyst/static/us_sentiment.csv', 'r')\n reader = csv.reader(csvfile)\n data = [row for row in reader]\n texts = [(text[1], text[3]) for text in data[2:]]\n new_data_text = [pre_processor(text[1], text[0])\n for text in texts]\n new_data_text = new_data_text[1:]\n df = DataFrame(new_data_text)\n df.columns = ['text', 'sentiment']\n\n df.to_csv('sentiment_analyst/static/bag_words_SVC.csv',\n sep=';', encoding='utf-8')\n\n print('Training...')\n train(df, 'sentiment_analyst/static/bag_words_SVC.csv')\n" }, { "alpha_fraction": 0.6367924809455872, "alphanum_fraction": 0.6396226286888123, "avg_line_length": 29.285715103149414, "blob_id": "2eb0e2b2bc0398893701a77bae09717d7c5f9160", "content_id": "f35a657086a9a18fd4ac29270a40bcff892de3bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1060, "license_type": "no_license", "max_line_length": 80, "num_lines": 35, "path": "/core/management/commands/trainer.py", "repo_name": "Leeaandrob/sentiment-analysis", "src_encoding": "UTF-8", "text": "# coding: utf-8\nimport pandas as pd\nimport sklearn\nfrom sklearn.externals import joblib\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.svm import SVC\n\nfrom django.core.management.base import BaseCommand\n\n\ndef train(df, fit_file):\n df = df.dropna()\n train_size = 0.8\n X_train, X_test, Y_train, Y_test = sklearn.model_selection.train_test_split(\n df.text, df.sentiment, train_size=train_size\n )\n\n model = SVC()\n vect = CountVectorizer(analyzer=\"word\",\n tokenizer=None,\n preprocessor=None,\n stop_words=None)\n\n pipe = Pipeline([('vect', vect), ('svc', model)])\n pipe.fit(X_train, Y_train)\n pipe.fit(df.text, df.sentiment)\n joblib.dump(pipe, fit_file)\n\n\nclass Command(BaseCommand):\n def handle(self, *args, **options):\n df = pd.read_csv('sentiment_analyst/static/bag_words_SVC.csv')\n print('Training...')\n train(df, 'sentiment_analyst/static/bag_words_SVC.csv')\n" }, { "alpha_fraction": 0.7136752009391785, "alphanum_fraction": 0.7236467003822327, "avg_line_length": 23.20689582824707, "blob_id": "f63f28ccc26976bb636c0061b3c28d86f9855dc3", "content_id": "1bc9c0204a6900164f81020486dae70ec2779d76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 702, "license_type": "no_license", "max_line_length": 76, "num_lines": 29, "path": "/README.md", "repo_name": "Leeaandrob/sentiment-analysis", "src_encoding": "UTF-8", "text": "# Sentiment analysis using Django\n\nA project to get and analysis sentimento from facebook page posts\n\n## Features\n\n- GET Facebook page posts comments\n- View to display sentiment analysis\n\n## How to Use\n\n1. Create your working environment inside a dir with `$ python3 -m venv env`\n2. Activate Virtual Env with `$ source env/bin/activate`\n3. Install the requirements (`$ pip install -r dev-requirements.txt`)\n4. `$ Make migrations`\n5. `$ Make migrate`\n6. `$ Make run`\n\n## Deployment to Heroku\n\nFirst install heroku toolbelt to use this..\nAfter your finish the release follow this steps.\n\n $ heroku git:remote\n $ git add .\n $ git commit -m \"Initial commit\"\n\t$ git push heroku master\n\nAnd done :)\n" } ]
10
songhao8080/demo_git
https://github.com/songhao8080/demo_git
50adb59e872f76ca9851f55a807f31c7650308ab
f373421f13d8b5e82e5176479a8df464e2293547
8043bb84c3db1f7c8ed6e8bed7f92938c35849f7
refs/heads/master
2021-06-15T19:18:13.546388
2017-03-07T11:52:08
2017-03-07T11:52:08
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4742826223373413, "alphanum_fraction": 0.47915539145469666, "avg_line_length": 34.13461685180664, "blob_id": "6c337bbdd65998bf3078806869824f15f1e9153c", "content_id": "8057f102daf86034d11599089155cdf1cddfd88a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1847, "license_type": "no_license", "max_line_length": 107, "num_lines": 52, "path": "/yachang/spiders/arton_net - 副本.py", "repo_name": "songhao8080/demo_git", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport scrapy\nfrom bs4 import BeautifulSoup as bs4\nimport socket,re\nsocket.setdefaulttimeout(5)\nfrom scrapy.http import Request\nfrom yachang.items import YachangItem\nimport sys\nimport pymongo\nreload(sys)\nsys.setdefaultencoding('utf-8')\nclass ArtonNetSpider(scrapy.Spider):\n name = \"bobao\"\n allowed_domains = [\"shop.artxun.com\"]\n start_urls = ['http://shop.artxun.com/']\n\n def parse(self, response):\n soup = bs4(response.body,'lxml')\n item = YachangItem()\n item['title']=re.search('<title>(.*?)</title>',response.body,re.S).group(1)\n item['url']=response.url \n item['st']=response.status\n # print response.headers\n # item['keywords']=re.search('<meta name=\"keywords\" content=\"(.*?)\"/>',response.body,re.S).group(1)\n # item['desc']=re.search('<meta name=\"description\" content=\"(.*?)\"/>',response.body,re.S).group(1)\n yield item\n for i in soup.find_all('a'):\n link = str(i.get('href'))\n if link.startswith('http://shop.artxun.com'):\n # print i.get('href')\n url = i.get('href')\n if 'net?id=' in url:\n pass\n else:\n \n try:\n yield Request(url, callback=self.parse)\n except Exception as e:\n pass\n \n else:\n url = 'http://shop.artxun.com'+ link\n # print \"++++++++++++++++++++\"\n # print url\n if 'net?id=' in url:\n pass\n else:\n \n try:\n yield Request(url, callback=self.parse)\n except Exception as e:\n pass\n \n" }, { "alpha_fraction": 0.5896226167678833, "alphanum_fraction": 0.6367924809455872, "avg_line_length": 19.200000762939453, "blob_id": "6869fc07c7116853b45985d9f9b2b37da5edaa1d", "content_id": "20747ea8c19bd95bc235b688a5279ece3eb9064e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 212, "license_type": "no_license", "max_line_length": 79, "num_lines": 10, "path": "/01.py", "repo_name": "songhao8080/demo_git", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\r\n#coding:utf-8\r\nimport sys\r\n\r\nreload(sys)\r\nsys.setdefaultencoding(\"utf-8\")\r\n\r\n\r\nfor x in xrange(1,1712893):\r\n open('users.txt','a+').write(\"http://blog.artron.net/space-%d.html\"%x+\"\\n\")\r\n" }, { "alpha_fraction": 0.5692771077156067, "alphanum_fraction": 0.5712851285934448, "avg_line_length": 29.125, "blob_id": "f9ca95d4000e0eb43d5c27c05dacc0f351977664", "content_id": "3c1b253659e0f6b7cbd52debd085c4e3a7d6171a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 996, "license_type": "no_license", "max_line_length": 59, "num_lines": 32, "path": "/yachang/mongo.py", "repo_name": "songhao8080/demo_git", "src_encoding": "UTF-8", "text": "# coding:utf-8\r\nimport pymongo\r\nfrom scrapy.exceptions import DropItem\r\nfrom scrapy.conf import settings\r\nfrom scrapy import log\r\nfrom pymongo import MongoClient\r\nHOST = settings[\"HOST\"]\r\nPORT = settings[\"PORT\"]\r\nDB = settings[\"DB\"]\r\nCOLLECTION = settings[\"COLLECTION\"]\r\n\r\nclass MongoDBPipeline(object):\r\n def __init__(self):\r\n connection = MongoClient(\r\n HOST,\r\n PORT\r\n )\r\n db = connection[DB]\r\n self.collection = db[COLLECTION]\r\n self.ids_seen = set()\r\n def process_item(self, item, spider):\r\n valid = True\r\n for data in item:\r\n if not data:\r\n valid = False\r\n raise DropItem(\"Missing {0}!\".format(data))\r\n if item['url'] not in self.ids_seen:\r\n self.collection.insert(dict(item))\r\n self.ids_seen.add(item['url'])\r\n log.msg(\"Question added to MongoDB database!\",\r\n level=log.DEBUG, spider=spider)\r\n return item\r\n" }, { "alpha_fraction": 0.5176169276237488, "alphanum_fraction": 0.5573350191116333, "avg_line_length": 37.025001525878906, "blob_id": "a800cb4686275b2d455e3a21109c05578db978e5", "content_id": "0682de5b1632e8cc4538e42d3718dc3338664faf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1587, "license_type": "no_license", "max_line_length": 176, "num_lines": 40, "path": "/get_blog_rizhi.py", "repo_name": "songhao8080/demo_git", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\r\n#coding:utf-8\r\nimport sys,re,requests\r\n\r\nreload(sys)\r\nsys.setdefaultencoding(\"utf-8\")\r\nurl = \"http://blog.artron.net/space-183160.html\"\r\n# http://blog.artron.net/space-183160.html\r\n\r\ndef getHtml(user_id):\r\n url = \"http://blog.artron.net/space-%d.html\"%user_id\r\n html = requests.get(url).content\r\n # print html\r\n # user_id = 183160\r\n try:\r\n zonglogs= re.search('''日志</a><em>\\((.*?)\\)</em></li>''', html, re.S).group(1)\r\n if zonglogs:\r\n print divmod(int(zonglogs), 10)[0]\r\n if divmod(int(zonglogs), 10)[1]>0:\r\n pages = divmod(int(zonglogs), 10)[0]+1\r\n for page in range(1,pages+1):\r\n user_log_url = \"http://blog.artron.net/space.php?uid=%d&do=blog&view=me&page=%d&ajaxdiv=maincontent&inajax=1&ajaxtarget=maincontent&inajax=1\"%(user_id,page)\r\n userlog = requests.get(user_log_url).content\r\n for pageurl in re.findall(r'<h4><a href=\"(.*?)\" target=\"_blank\" >', userlog, re.S):\r\n open('pageurls.txt','a+').write(pageurl+\"\\n\")\r\n except Exception as e:\r\n pass\r\n\r\n # print re.findall(r'<h4><a href=\"(.*?)\" target=\"_blank\" >',userlog, re.S)\r\n\r\n'''\r\nonclick=\"getindex('blog');\">日志</a><em>(106)</em></li>\r\n<a href=\"javascript:;\" onclick=\"getindex('blog');\">日志</a>\r\n</span>\r\n<h4><a href=\"http://blog.artron.net/space-183160-do-blog-id-1040311.html\" target=\"_blank\">今日美术馆照片</a></h4>\r\n'''\r\n\r\nif __name__ == '__main__':\r\n for i in xrange(10,1000):\r\n getHtml(i)\r\n" }, { "alpha_fraction": 0.5876106023788452, "alphanum_fraction": 0.5893805027008057, "avg_line_length": 30.41176414489746, "blob_id": "53ff0716734d940630146c470081b552ee37108a", "content_id": "0bb5078c0f2cabf6abd61e722b417be91fc5e7e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 565, "license_type": "no_license", "max_line_length": 72, "num_lines": 17, "path": "/yachang/middlewares.py", "repo_name": "songhao8080/demo_git", "src_encoding": "UTF-8", "text": "#!/usr/bin/python \r\n#-*-coding:utf-8-*- \r\nfrom agents import AGENTS\r\nimport random \r\nfrom scrapy.downloadermiddlewares.useragent import UserAgentMiddleware \r\n \r\nclass RotateUserAgentMiddleware(UserAgentMiddleware): \r\n def __init__(self, user_agent=''): \r\n self.user_agent = user_agent \r\n \r\n def process_request(self, request, spider): \r\n ua = random.choice(AGENTS) \r\n if ua: \r\n # print ua \r\n request.headers.setdefault('User-Agent', ua)\r\n else:\r\n print \"no user agents\" \r\n \r\n " }, { "alpha_fraction": 0.6027820706367493, "alphanum_fraction": 0.6213291883468628, "avg_line_length": 33.078948974609375, "blob_id": "d386492f265c7ca8c1a5325a1110190e81ffc2a7", "content_id": "921299929c3e1721d60c24f2d6db589c17a6519a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1294, "license_type": "no_license", "max_line_length": 118, "num_lines": 38, "path": "/yachang/spiders/arton_net.py", "repo_name": "songhao8080/demo_git", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport scrapy\nfrom bs4 import BeautifulSoup as bs4\nimport socket,re,requests\nfrom lxml import etree\nsocket.setdefaulttimeout(5)\nfrom scrapy.http import Request\nfrom yachang.items import YachangItem\nimport sys,re\nimport chardet\nimport pymongo\nreload(sys)\nsys.setdefaultencoding('utf-8')\nclass ArtonNetSpider(scrapy.Spider):\n\n item = YachangItem()\n name = \"luntan\"\n allowed_domains = [\"blog.artron.net\"]\n # start_urls = []\n # for cateurl in open('users.txt','r'):\n # start_urls.append(cateurl.strip('\\n'))\n\n # def get_or_not(self):\n # try:\n # item['title']=re.search('<title>(.*?)</title>',response.body,re.S).group(1).decode('gbk').encode('utf8')\n # except Exception as e:\n # item['title']= 'no data'\n def start_requests(self):\n for x in xrange(1, 1712893, 1):\n url = \"http://www.jiemian.com/article/%d.html\" % x\n #http://www.jiemian.com/article/492490.html\n yield scrapy.Request(url=url, callback=self.parse)\n def parse(self, response):\n # soup = bs4(response.body,'lxml')\n item = YachangItem()\n title= re.search('<title>(.*?)</title>',response.body,re.S).group(1)\n #print response.url\n open(\"titles.txt\",'a+').write(title+\"\\n\")" } ]
6
ericrosenbrown/robot_song
https://github.com/ericrosenbrown/robot_song
1eba71262c8c312eaba9abaf0536aa39dc913c30
0a22ce1d60af1c0a35f87e892f0f81f649eb4f43
4491f0c4320a114fee8c7c16eb0d79b11b580c39
refs/heads/master
2015-09-26T10:31:22.656929
2015-09-22T03:27:16
2015-09-22T03:27:16
42,827,487
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6856136918067932, "alphanum_fraction": 0.7671025991439819, "avg_line_length": 21.590909957885742, "blob_id": "0988f27f9581f6dab02cc5cd0cecc7cba6d6ef50", "content_id": "2aa3d7493ca77d5677a994bcbcbe36fd710f0c61", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1988, "license_type": "no_license", "max_line_length": 61, "num_lines": 88, "path": "/scripts/test2.py", "repo_name": "ericrosenbrown/robot_song", "src_encoding": "UTF-8", "text": "#Import the library\nfrom midiutil.MidiFile import MIDIFile\nimport time\nimport wave\n\n# Create the MIDIFile Object with 1 track\nMyMIDI = MIDIFile(1)\n \n\n# Tracks are numbered from zero. Times are measured in beats.\ntrack = 0 \ntime = 0\ntempo = 117\n \n\n# Add track name and tempo.\nMyMIDI.addTrackName(track,time,\"Sample Track\")\nMyMIDI.addTempo(track,time,tempo)\n \n\n# Add a note. addNote expects the following information:\nchannel = 0\nC4 = 60\nD4 = 62\nE4 = 64\nF4 = 65\nG4 = 67\nA4 = 69\nC5 = 72\n\nvolume = 100\n\nMyMIDI.addProgramChange(track,channel,time,1)\n\n#Measure 1\nMyMIDI.addNote(track,channel,C4,0,1,volume)\n\nMyMIDI.addNote(track,channel,F4,1,2,volume)\nMyMIDI.addNote(track,channel,F4,2,2.5,volume)\nMyMIDI.addNote(track,channel,F4,2.5,3,volume)\n\nMyMIDI.addNote(track,channel,F4,3,4,volume)\nMyMIDI.addNote(track,channel,A4,4,5,volume)\n\nMyMIDI.addNote(track,channel,C5,5,6,volume)\nMyMIDI.addNote(track,channel,A4,6,7,volume)\n\n#Measure 2\nMyMIDI.addNote(track,channel,F4,7,9,volume)\n\nMyMIDI.addNote(track,channel,G4,9,10,volume)\nMyMIDI.addNote(track,channel,G4,10,11,volume)\n\nMyMIDI.addNote(track,channel,G4,11,13,volume)\n\nMyMIDI.addNote(track,channel,E4,13,14,volume)\nMyMIDI.addNote(track,channel,D4,14,15,volume)\n\n#Measure 3\nMyMIDI.addNote(track,channel,C4,15,16,volume)\nMyMIDI.addNote(track,channel,C4,16,17,volume)\n\nMyMIDI.addNote(track,channel,F4,17,18,volume)\nMyMIDI.addNote(track,channel,F4,18,18.5,volume)\nMyMIDI.addNote(track,channel,F4,18.5,19,volume)\n\nMyMIDI.addNote(track,channel,F4,19,20,volume)\nMyMIDI.addNote(track,channel,A4,20,21,volume)\n\nMyMIDI.addNote(track,channel,C5,21,22,volume)\nMyMIDI.addNote(track,channel,A4,22,23,volume)\n\n#Measure 4\nMyMIDI.addNote(track,channel,F4,23,24,volume)\nMyMIDI.addNote(track,channel,F4,24,25,volume)\n\nMyMIDI.addNote(track,channel,G4,25,27,volume)\n\nMyMIDI.addNote(track,channel,C4,27,29,volume)\n\nMyMIDI.addNote(track,channel,F4,29,33,volume)\n\n\n\n# And write it to disk.\nbinfile = open(\"output.mid\", 'wb')\nMyMIDI.writeFile(binfile)\nbinfile.close()\n" }, { "alpha_fraction": 0.6229190230369568, "alphanum_fraction": 0.6394051909446716, "avg_line_length": 28.74519157409668, "blob_id": "978d75d398298211f4d84e0c9eedb4276fd34bca", "content_id": "d51c085affe09796cb8ebe0aa40c1e3ec2866361", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6187, "license_type": "no_license", "max_line_length": 90, "num_lines": 208, "path": "/scripts/ein_client_test.py", "repo_name": "ericrosenbrown/robot_song", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport rospy\nimport std_msgs\nimport roslib\n#roslib.load_manifest(\"baxter_pick_and_place\")\nimport readline\nimport time\nimport pygame\nimport thread\nfrom ein.msg import EinState\n\nreadline.parse_and_bind('tab: complete')\nreadline.parse_and_bind('set editing-mode emacs')\n\n\n\nclass SimpleCompleter(object):\n \n def __init__(self, options):\n self.options = sorted(options)\n return\n\n def complete(self, text, state):\n response = None\n if state == 0:\n # This is the first time for this text, so build a match list.\n if text:\n self.matches = [s \n for s in self.options\n if s and s.startswith(text)]\n else:\n self.matches = self.options[:]\n \n # Return the state'th item from the match list,\n # if we have that many.\n try:\n response = self.matches[state]\n except IndexError:\n response = None\n return response\n\n\nclass EinClient:\n def __init__(self, words, publish_topic, state_topic):\n print \"publish topic: \", publish_topic\n print \"state topic: \", state_topic\n\n self.forth_command_publisher = rospy.Publisher(publish_topic, \n std_msgs.msg.String, queue_size=10)\n\n self.state_subscriber = rospy.Subscriber(state_topic, \n EinState, self.state_callback)\n self.state = None\n self.stack = []\n \n readline.set_completer(SimpleCompleter(words).complete)\n save_history_hook()\n\n def state_callback(self, msg):\n self.state = msg\n self.stack = self.state.stack\n\n def printStack(self):\n print \"Call Stack: \"\n for word in reversed(self.stack):\n print \" \".rjust(15), word\n def ultrasoundm(self):\n\tself.forth_command_publisher.publish(\".55 .83 .47 -.45 .83 -.24 .17 moveToEEPose ;\")\n\tself.forth_command_publisher.publish(\"xDown 20 replicateWord ;\")\n\n def IKm(self):\n\tself.forth_command_publisher.publish(\"assumeCrane1 ;\")\n\ttime.sleep(1)\n\tself.forth_command_publisher.publish(\"zDown 40 replicateWord ;\")\n\ttime.sleep(2.5)\n\tself.forth_command_publisher.publish(\"xDown 40 replicateWord ;\")\n\ttime.sleep(2)\n\tself.forth_command_publisher.publish(\"xUp 40 replicateWord ;\")\n\ttime.sleep(2)\n\tself.forth_command_publisher.publish(\"xUp 40 replicateWord ;\")\n\ttime.sleep(2)\n\tself.forth_command_publisher.publish(\"xDown 40 replicateWord ;\")\n\ttime.sleep(2)\n\tself.forth_command_publisher.publish(\"xUp 40 replicateWord ;\")\n\ttime.sleep(2)\n\tself.forth_command_publisher.publish(\"xDown 40 replicateWord ;\")\n\ttime.sleep(3)\n\tself.forth_command_publisher.publish(\"xUp 40 replicateWord ;\")\n\ttime.sleep(2)\n\tself.forth_command_publisher.publish(\"xDown 40 replicateWord ;\")\n def servosm(self):\n\tself.forth_command_publisher.publish(\"assumeBeeHome ;\")\n\ttime.sleep(2.5)\n\tself.forth_command_publisher.publish(\"xDown 20 replicateWord ;\")\n\ttime.sleep(2)\n\tself.forth_command_publisher.publish(\"xUp 20 replicateWord ;\")\n\ttime.sleep(2)\n\tself.forth_command_publisher.publish(\"zUp 20 replicateWord ;\")\n\ttime.sleep(2)\n\tself.forth_command_publisher.publish(\"zDown 20 replicateWord ;\")\n\ttime.sleep(2)\n\tself.forth_command_publisher.publish(\"yUp 20 replicateWord ;\")\n\ttime.sleep(2)\n\tself.forth_command_publisher.publish(\"yDown 20 replicateWord ;\")\n\ttime.sleep(3)\n\tself.forth_command_publisher.publish(\"xDown 20 replicateWord ;\")\n\ttime.sleep(2)\n\tself.forth_command_publisher.publish(\"xUp 20 replicateWord ;\")\n def grippersm(self):\n\tself.forth_command_publisher.publish(\"assumeHandingPose ;\")\n\ttime.sleep(2)\n self.forth_command_publisher.publish(\"closeGripper ;\")\n\ttime.sleep(2.5)\n\tself.forth_command_publisher.publish(\"openGripper ;\")\n\ttime.sleep(1)\n\tself.forth_command_publisher.publish(\"closeGripper ;\")\n\ttime.sleep(1)\n\tself.forth_command_publisher.publish(\"openGripper ;\")\n\ttime.sleep(1)\n\tself.forth_command_publisher.publish(\"closeGripper ;\")\n\ttime.sleep(1)\n\tself.forth_command_publisher.publish(\"openGripper ;\")\n\ttime.sleep(1)\n\tself.forth_command_publisher.publish(\"closeGripper ;\")\n\ttime.sleep(3)\n\tself.forth_command_publisher.publish(\"openGripper ;\")\n\ttime.sleep(1)\n\tself.forth_command_publisher.publish(\"closeGripper ;\")\n\t\n def ask(self):\n\tready = raw_input(\"press enter to start\")\n\t#thread.start_new_thread(gripperss,())\n\t#thread.start_new_thread(grippers,())\n\tself.grippersm()\n\t#thread.start_new_thread(servoss,())\n\t#thread.start_new_thread(servos,())\n\tself.servosm()\n\t#self.ultrasoundm()\n\tself.IKm()\n\n\"\"\"\n rospy.sleep(0.2)\n self.printStack()\n try:\n line = raw_input('Prompt (\"stop\" to quit): ')\n except EOFError:\n break\n \n if line == 'stop':\n break\n print 'ENTERED: \"%s\"' % line\n self.forth_command_publisher.publish(line);\"\"\"\ndef save_history_hook():\n import os\n histfile = os.path.join(os.path.expanduser(\"~\"), \".ein_client_history\")\n try:\n readline.read_history_file(histfile)\n except IOError:\n pass\n import atexit\n atexit.register(readline.write_history_file, histfile)\n\ndef grippers():\n\tpygame.init()\n\n\tpygame.mixer.music.load(\"grippers.wav\")\n\tpygame.mixer.music.play()\n\ndef gripperss():\n\tpygame.mixer.init(44100, -16, 2, 1024)\n\tpygame.mixer.music.load(\"grippers.mid\")\n\tpygame.mixer.music.play()\n\ndef servos():\n\tservosSong = pygame.mixer.Sound(\"servos.wav\")\n\n\tservosSong.play()\n\ndef servoss():\n\tpygame.mixer.music.load(\"servos.mid\")\n\tpygame.mixer.music.play()\n\ndef main():\n import sys\n if (len(sys.argv) != 2):\n print \"usage: ein_client.py left|right\"\n return\n\n arm = sys.argv[1]\n\n rospy.init_node(\"ein_client_%s\" % arm, anonymous=True)\n words = []\n for wordline in open(\"ein_words.txt\"):\n words.append(wordline.split(\" \")[0]) \n print words\n\n\n client = EinClient(words, \n \"/ein/%s/forth_commands\" % arm,\n \"/ein_%s/state\" % arm,\n )\n\n client.ask()\n\n\n \nif __name__=='__main__':\n main()\n" }, { "alpha_fraction": 0.5277327299118042, "alphanum_fraction": 0.5309762954711914, "avg_line_length": 25.127119064331055, "blob_id": "2a3337541c3ee6c6defa76d254b58a3c19dc007c", "content_id": "02f599128ed2295ebf37c795f8ec84ae2350b2cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3083, "license_type": "no_license", "max_line_length": 90, "num_lines": 118, "path": "/scripts/ein_client.py", "repo_name": "ericrosenbrown/robot_song", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport rospy\nimport std_msgs\nimport roslib\n#roslib.load_manifest(\"baxter_pick_and_place\")\nimport readline\n\nfrom ein.msg import EinState\n\nreadline.parse_and_bind('tab: complete')\nreadline.parse_and_bind('set editing-mode emacs')\n\n\n\nclass SimpleCompleter(object):\n \n def __init__(self, options):\n self.options = sorted(options)\n return\n\n def complete(self, text, state):\n response = None\n if state == 0:\n # This is the first time for this text, so build a match list.\n if text:\n self.matches = [s \n for s in self.options\n if s and s.startswith(text)]\n else:\n self.matches = self.options[:]\n \n # Return the state'th item from the match list,\n # if we have that many.\n try:\n response = self.matches[state]\n except IndexError:\n response = None\n return response\n\n\nclass EinClient:\n def __init__(self, words, publish_topic, state_topic):\n print \"publish topic: \", publish_topic\n print \"state topic: \", state_topic\n\n self.forth_command_publisher = rospy.Publisher(publish_topic, \n std_msgs.msg.String, queue_size=10)\n\n self.state_subscriber = rospy.Subscriber(state_topic, \n EinState, self.state_callback)\n self.state = None\n self.stack = []\n \n readline.set_completer(SimpleCompleter(words).complete)\n save_history_hook()\n\n def state_callback(self, msg):\n self.state = msg\n self.stack = self.state.stack\n\n def printStack(self):\n print \"Call Stack: \"\n for word in reversed(self.stack):\n print \" \".rjust(15), word\n\n\n def ask(self):\n\n while True:\n rospy.sleep(0.2)\n self.printStack()\n try:\n line = raw_input('Prompt (\"stop\" to quit): ')\n except EOFError:\n break\n \n if line == 'stop':\n break\n print 'ENTERED: \"%s\"' % line\n self.forth_command_publisher.publish(line);\n\ndef save_history_hook():\n import os\n histfile = os.path.join(os.path.expanduser(\"~\"), \".ein_client_history\")\n try:\n readline.read_history_file(histfile)\n except IOError:\n pass\n import atexit\n atexit.register(readline.write_history_file, histfile)\n\ndef main():\n import sys\n if (len(sys.argv) != 2):\n print \"usage: ein_client.py left|right\"\n return\n\n arm = sys.argv[1]\n\n rospy.init_node(\"ein_client_%s\" % arm, anonymous=True)\n words = []\n for wordline in open(\"ein_words.txt\"):\n words.append(wordline.split(\" \")[0])\n \n print words\n\n\n client = EinClient(words, \n \"/ein/%s/forth_commands\" % arm,\n \"/ein_%s/state\" % arm,\n )\n\n client.ask()\n\n\n \nif __name__=='__main__':\n main()\n" } ]
3
Richie1710/corona-rki--prometheus-exporter
https://github.com/Richie1710/corona-rki--prometheus-exporter
643fbb1b07368a5dd9ca492006af486e44db85d5
a002572b25b96d41b7d2fa81ac1af915f3dd5fc5
77597c4bc3c1c784f81a16a4995a8d3ebb3445f3
refs/heads/main
2023-04-19T03:13:44.529704
2021-04-01T07:48:54
2021-04-01T07:48:54
352,879,313
1
0
MIT
2021-03-30T05:21:04
2021-04-01T07:48:56
2021-04-01T07:48:54
Python
[ { "alpha_fraction": 0.5987915396690369, "alphanum_fraction": 0.6250755190849304, "avg_line_length": 32.43434524536133, "blob_id": "eef2fd3ffe183737df9c93a5a4151a4f5b6b750c", "content_id": "4ee4193efdb937ad9f0224815d38dc1c804a6862", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3312, "license_type": "permissive", "max_line_length": 121, "num_lines": 99, "path": "/corona_rki_prometheus_exporter/exporter.py", "repo_name": "Richie1710/corona-rki--prometheus-exporter", "src_encoding": "UTF-8", "text": "\"\"\"\nProvides an exporter for corona data\n\"\"\"\n\nimport time\nimport sys\nimport argparse\nimport requests # pylint: disable=E0401 disable because of not checking external libaries\nfrom prometheus_client import ( # type: ignore # pylint: disable=E0401 disable because of not checking external libaries\n Gauge,\n start_http_server,\n REGISTRY,\n)\n\n\ndef parse_arguments(arguments):\n \"\"\"Parse Arguments\n\n Args:\n args (sys.argv): Arguments of script call\n \"\"\"\n parser = argparse.ArgumentParser(description=\"get corona inzidenz from RKI\")\n parser.add_argument(\"gen\", type=str, help=\"name of state\", default=\"Bautzen\")\n return parser.parse_args(arguments)\n\n\ndef getcorona_information_from_rki(gen: str = \"Bautzen\") -> str:\n \"\"\"Get Corona Information from RKI and returns as json\n\n Args:\n gen (str): Name of district to search for\n\n Returns:\n any: json data\n \"\"\"\n url = \"https://services7.arcgis.com/mOBPykOjAyBO2ZKk/arcgis/rest/services/\\\nRKI_Landkreisdaten/FeatureServer/0/query?where=GEN%20%3D%20'{}'&\\\noutFields=EWZ_BL,EWZ,cases_per_population,cases,deaths,death_rate,\\\ncases7_per_100k,cases7_bl_per_100k,cases7_bl,death7_bl,cases7_lk,death7_lk,\\\ncases7_per_100k_txt&returnGeometry=false&outSR=4326&f=json\".format(\n gen\n )\n req = requests.get(url)\n req.raise_for_status()\n return req.json()\n\n\ndef process_request(gaugename: Gauge, api_name: str, corona_data=None):\n \"\"\"Write Data to gauge\n\n Args:\n gaugename (Gauge): Prometheus Gauge\n api_name (str): Name of API in RKI Json Data\n corona_data (str(json)), optional): Json Data from RKI. If no data \\\n given it will fetch new. Defaults to None.\n \"\"\"\n if not corona_data:\n corona_data = getcorona_information_from_rki()\n featuredata = corona_data[\"features\"][0][\"attributes\"]\n gaugename.set(featuredata[api_name])\n\n\nif __name__ == \"__main__\":\n\n args = parse_arguments(sys.argv[1:])\n # No other way to remove python gc values from exporter\n for coll in list(REGISTRY._collector_to_names.keys()): # pylint: disable=W0212\n REGISTRY.unregister(coll)\n EWZ = Gauge(\"EWZ_{}\".format(args.gen).replace(\"-\", \"_\"), \"Einwohnerzahl {}\".format(args.gen))\n EWZ_BL = Gauge(\"EWZ_BL\", \"Einwohnerzahl Bundesland\")\n cases = Gauge(\"Coronafaelle_{}\".format(args.gen).replace(\"-\", \"_\"), \"Coronafälle in {}\".format(args.gen),)\n death = Gauge(\"Todesfaelle_{}\".format(args.gen).replace(\"-\", \"_\"), \"Todesfälle {}\".format(args.gen),)\n cases7_per_100k = Gauge(\n \"Inzidenz_{}\".format(args.gen).replace(\"-\", \"_\"),\n \"Inzidenzwert auf 100.000 \\\n Einwohner {}\".format(\n args.gen\n ).replace(\n \"-\", \"_\"\n ),\n )\n cases7_bl_per_100k = Gauge(\n \"Inzidenz_BL\",\n \"Inzidenzwert auf 100.000 \\\n Einwohner BL\",\n )\n start_http_server(8000)\n while True:\n corona_json = getcorona_information_from_rki(args.gen)\n for gauge in (\n (EWZ, \"EWZ\"),\n (EWZ_BL, \"EWZ_BL\"),\n (cases, \"cases\"),\n (death, \"deaths\"),\n (cases7_per_100k, \"cases7_per_100k\"),\n (cases7_bl_per_100k, \"cases7_bl_per_100k\"),\n ):\n process_request(gauge[0], gauge[1], corona_json)\n time.sleep(300)\n" }, { "alpha_fraction": 0.5975367426872253, "alphanum_fraction": 0.6432260870933533, "avg_line_length": 34.9571418762207, "blob_id": "37e16e6283f9b5768b45460fc757f6f287d4fb8c", "content_id": "b667e854a8f5f7bffd62cfe6a980e5132cb1fcb4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2519, "license_type": "permissive", "max_line_length": 191, "num_lines": 70, "path": "/corona_rki_prometheus_exporter/tests/test_exporter.py", "repo_name": "Richie1710/corona-rki--prometheus-exporter", "src_encoding": "UTF-8", "text": "\"\"\"\nThis file keeps tests for corona_rki_prometheus_exporter\n\"\"\"\n\n\nimport exporter # noqa: E402 # pylint: disable=C0413\nfrom prometheus_client import Gauge # type: ignore # pylint: disable=C0413,E0401 # noqa:402\n\n\ndef test_getcorona_information_from_rki():\n \"\"\"\n Tests the getting of information from rki\n \"\"\"\n response_bautzen = exporter.getcorona_information_from_rki(\"Bautzen\")\n assert len(response_bautzen[\"features\"]) > 0\n response_noneexisting = exporter.getcorona_information_from_rki(\n \"Not \\\n existing State\"\n )\n assert len(response_noneexisting[\"features\"]) == 0\n response_eichsfeld = exporter.getcorona_information_from_rki(\"Eichsfeld\")\n assert len(response_eichsfeld[\"features\"]) > 0\n assert response_eichsfeld[\"features\"] != response_bautzen[\"features\"]\n\n\ndef test_argument_parseing():\n \"\"\"Tests the argument parsing\n \"\"\"\n args = exporter.parse_arguments([\"TEST\"])\n assert args.gen == \"TEST\"\n\n\ndef test_process_request(mocker):\n \"\"\"Tests the request processing\n \"\"\"\n\n ewz = Gauge(\"EWZ_Bautzen\", \"Einwohnerzahl Bautzen\")\n ewz_bl = Gauge(\"EWZ_Sachsen\", \"Einwohnerzahl Sachsen\")\n cases = Gauge(\"Coronafaelle_Bautzen\", \"Coronafälle in Bautzen\")\n death = Gauge(\"Todesfaelle_Bautzen\", \"Todesfälle Bautzen\")\n cases7_per_100k = Gauge(\n \"Inzidenz_bautzen\",\n \"Inzidenzwert auf 100.000 \\\n Einwohner Bautzen\",\n )\n cases7_bl_per_100k = Gauge(\n \"Inzidenz_Sachsen\",\n \"Inzidenzwert auf 100.000 \\\n Einwohner Sachsen\",\n )\n mocker.patch(\n \"exporter.getcorona_information_from_rki\", return_value={\"features\": [{\"attributes\": {\"EWZ_BL\": 1, \"EWZ\": 2, \"cases\": 3, \"death\": 4, \"cases7_per_100k\": 5, \"cases7_bl_per_100k\": 6}}]},\n )\n\n for gauge in (\n (ewz, \"EWZ\"),\n (ewz_bl, \"EWZ_BL\"),\n (cases, \"cases\"),\n (death, \"death\"),\n (cases7_per_100k, \"cases7_per_100k\"),\n (cases7_bl_per_100k, \"cases7_bl_per_100k\"),\n ):\n exporter.process_request(gauge[0], gauge[1])\n # disable pylint W0212 because there is no getter function for getting values\n assert ewz_bl._value._value == 1 # pylint: disable=W0212\n assert ewz._value._value == 2 # pylint: disable=W0212\n assert cases._value._value == 3 # pylint: disable=W0212\n assert death._value._value == 4 # pylint: disable=W0212\n assert cases7_per_100k._value._value == 5 # pylint: disable=W0212\n assert cases7_bl_per_100k._value._value == 6 # pylint: disable=W0212\n" }, { "alpha_fraction": 0.6212121248245239, "alphanum_fraction": 0.7272727489471436, "avg_line_length": 15.5, "blob_id": "f1f26df0dc5d03e390b9913481f0638ed7058423", "content_id": "3bc4b94193f6123b90b06d83dd1de4f4726b11f5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TOML", "length_bytes": 132, "license_type": "permissive", "max_line_length": 30, "num_lines": 8, "path": "/pyproject.toml", "repo_name": "Richie1710/corona-rki--prometheus-exporter", "src_encoding": "UTF-8", "text": "[tool.pylint.messages_control]\ndisable = \"C0330, C0326\"\n\n[tool.pylint.format]\nmax-line-length = 200\n\n[tool.black]\nline-length = 200\n" }, { "alpha_fraction": 0.7832167744636536, "alphanum_fraction": 0.7832167744636536, "avg_line_length": 19.571428298950195, "blob_id": "190e5fd2a86b87e6a85180e9d7ca54795bb03098", "content_id": "9c3c7895b36cb64668e52b400b015ba9a6a51142", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 143, "license_type": "permissive", "max_line_length": 58, "num_lines": 7, "path": "/README.md", "repo_name": "Richie1710/corona-rki--prometheus-exporter", "src_encoding": "UTF-8", "text": "# corona-rki--prometheus-exporter\nexports data from RKI API and provides prometheus exporter\n\n\n# Usage\n\npython .\\exporter.py \"Rhein-Sieg-Kreis\"" }, { "alpha_fraction": 0.7029703259468079, "alphanum_fraction": 0.7227723002433777, "avg_line_length": 19.200000762939453, "blob_id": "d19f3eaeae78ffce0b6c3badc65a2463c94a1803", "content_id": "79b7655f33e087e69203ae863a6cb796179a6f09", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 101, "license_type": "permissive", "max_line_length": 57, "num_lines": 5, "path": "/setup.py", "repo_name": "Richie1710/corona-rki--prometheus-exporter", "src_encoding": "UTF-8", "text": "from distutils.core import setup\n\nsetup(\n name=\"corona-rki-prometheus-exporter\", version=\"1.0\",\n)\n" } ]
5
bera5186/Amazon-Price-tracker
https://github.com/bera5186/Amazon-Price-tracker
dca82f2f49998f29b8a753418f2fff28b3826e5f
8496598c7eeaab5fbd895985f4a9ac60be1d456e
ecd6df36c46cc560f4e6c8c40ee36dc6025ba730
refs/heads/master
2020-08-07T22:26:05.888441
2019-10-07T18:22:39
2019-10-07T18:22:39
213,605,149
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5387365818023682, "alphanum_fraction": 0.6162097454071045, "avg_line_length": 31, "blob_id": "f304165007f4ccc5df1b95d11a8260614f90b2ae", "content_id": "131da8931584d2f2d8d44304642c1ebfae548830", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 842, "license_type": "no_license", "max_line_length": 293, "num_lines": 26, "path": "/src/Scrape.py", "repo_name": "bera5186/Amazon-Price-tracker", "src_encoding": "UTF-8", "text": "from bs4 import BeautifulSoup\nimport requests\nimport re\n\nclass Scraper:\n \n def __init__(self, url):\n self.url = url\n\n def scrape(self):\n \n result = self.url.find('www.amazon.in')\n\n if(result == -1):\n return 'Not an Amazon link 🔥'\n else:\n page = requests.get(self.url)\n \n soup = BeautifulSoup(page.text, 'html.parser')\n price = soup.find('span', {'id' : 'priceblock_dealprice'}).text\n return price\n\n\n \nobj = Scraper('https://www.amazon.in/Samsung-Galaxy-Black-Storage-Offers/dp/B0756ZBZ5P/ref=sr_1_1?_encoding=UTF8&pf_rd_i=desktop&pf_rd_m=A1VBAL9TL5WCBF&pf_rd_p=4430387b-ce1c-4c49-88fd-905b258011de&pf_rd_r=VFE3XHG96C1QDF7A500J&pf_rd_t=36701&qid=1565513852&s=gateway&smid=A1EWEIV3F4B24B&sr=8-1')\nprint(obj.scrape())\n \n\n\n" }, { "alpha_fraction": 0.5342953205108643, "alphanum_fraction": 0.541806697845459, "avg_line_length": 25.767196655273438, "blob_id": "9e99ba79a944a152f45aa5ce2da79c25ae90bb6a", "content_id": "155416a213b4713e8cf8f99c0ad1c166712c2935", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5059, "license_type": "no_license", "max_line_length": 89, "num_lines": 189, "path": "/api/main.py", "repo_name": "bera5186/Amazon-Price-tracker", "src_encoding": "UTF-8", "text": "\"\"\"\nauthor: Rahul Bera <[email protected]>\n\nMain API file\n\n\"\"\"\n\nfrom flask import Flask, jsonify, request, Response\nfrom flask_restful import Api, Resource\nfrom pymongo import MongoClient\nimport bcrypt\nimport json\n\napp = Flask(__name__)\napi = Api(app)\n\n# Database Settings\ntry:\n conn = MongoClient(\"mongodb://localhost:27017\")\n db = conn.amazon_price_tracker\n productCollection = db.products\n usersCollection = db.users\n print(\"db connected\")\nexcept:\n print(\"error connecting to db\")\n\n\nclass CreateUser(Resource):\n def post(self):\n\n \"\"\"\n Create a user in a database\n \n \"\"\"\n postedData = request.get_json()\n email = postedData[\"email\"]\n userName = postedData[\"username\"]\n password = postedData[\"password\"]\n\n checkedEmail = usersCollection.find_one({\"email\": email})\n\n if checkedEmail is None:\n hashedPassword = bcrypt.hashpw(password.encode(\"utf-8\"), bcrypt.gensalt(8))\n\n userDocument = {\n \"email\": email,\n \"password\": hashedPassword,\n \"username\": userName,\n }\n\n usersCollection.insert_one(userDocument)\n\n try:\n js = json.dumps(\n {\"message\": \"User succesfully created\", \"success\": True}\n )\n response = Response(js, status=201, mimetype=\"application/json\")\n except:\n js = json.dumps({\"message\": \"Cannot create a user\", \"success\": False})\n response = Response(js, status=500, mimetype=\"application/json\")\n\n return response\n else:\n js = json.dumps({\"message\": \"Email already taken\", \"success\": False})\n\n response = Response(js, status=400, mimetype=\"application/json\")\n return response\n\n\n\nclass GetUserForLogin(Resource):\n def get(self):\n \"\"\"\n Check for a user in database\n \n \"\"\"\n\n postedData = request.get_json()\n email = postedData[\"email\"]\n password = postedData[\"password\"]\n\n dbEmail = usersCollection.find_one({\"email\": email})\n\n if dbEmail is None:\n js = json.dumps(\n {\"message\": \"Incorrect Email or Password\", \"success\": False}\n )\n\n reponse = Response(js, status=404, mimetype=\"application/json\")\n return reponse\n else:\n if (\n bcrypt.hashpw(password.encode(\"utf-8\"), dbEmail[\"password\"])\n == dbEmail[\"password\"]\n ):\n js = json.dumps({\"message\": \"successfully logged in\", \"sucess\": True})\n\n response = Response(js, status=200, mimetype=\"application/json\")\n return response\n\n else:\n js = json.dumps(\n {\"message\": \"Incorrect email or password\", \"sucess \": False}\n )\n\n response = Response(js, status=404, mimetype=\"application/json\")\n return response\n\n\nclass GetUserName(Resource):\n def get(self):\n \"\"\"\n API endpoint to get a single user\n \n \"\"\"\n\n postedData = request.get_json()\n email = postedData[\"email\"]\n\n user = usersCollection.find_one({\"email\": email})\n\n if user is None:\n js = json.dumps(\n {\n \"message\": \"User with email\" + email + \" not found in database\",\n \"success\": False,\n }\n )\n\n response = Response(js, status=404, mimetype=\"application/json\")\n return response\n\n else:\n\n js = json.dumps({\"data\": user[\"username\"], \"success\": True})\n\n response = Response(js, status=200, mimetype=\"application/json\")\n return response\n\n\nclass CreateProduct(Resource):\n def post(self):\n \n \"\"\"\n API endpoint to create a product of a user\n \n \"\"\"\n\n postedData = request.get_json()\n email = postedData['email']\n link = postedData['link']\n price = postedData['price']\n\n user = usersCollection.find_one({\"email\": email})\n\n if user:\n\n productDocument = {\n 'email' : email, \n 'link' : link, \n 'price' : price\n }\n productCollection.insert_one(productDocument)\n\n js = json.dumps({'message' : 'Product created succesfully', 'success': True})\n response = Response(js, status=200, mimetype=\"application/json\")\n return response\n\n else :\n js = json.dumps({'message' : 'User not found', 'success': False})\n response = Response(js, status=403, mimetype=\"application/json\")\n return response\n\n\n\n\napi.add_resource(CreateUser, \"/signup\")\napi.add_resource(GetUserForLogin, \"/signin\")\napi.add_resource(GetUserName, \"/getuser\")\napi.add_resource(CreateProduct, \"/createproduct\")\n\n\[email protected](\"/\")\ndef home():\n return \"Welcome to API Home\"\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n" }, { "alpha_fraction": 0.5798946619033813, "alphanum_fraction": 0.6000878214836121, "avg_line_length": 26.445783615112305, "blob_id": "6ce68546c1dd115d537dd053ea56c6123f22791a", "content_id": "527b874565e98d799a13609f1986de1f96befda7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2278, "license_type": "no_license", "max_line_length": 84, "num_lines": 83, "path": "/src/app.py", "repo_name": "bera5186/Amazon-Price-tracker", "src_encoding": "UTF-8", "text": "from flask import Flask, redirect, render_template, session, url_for, request, flash\nimport requests as r\nimport time\n\napp = Flask(__name__)\n\n\[email protected](\"/login\", methods=[\"GET\", \"POST\"])\ndef login():\n\n if request.method == \"POST\":\n email = request.form[\"email\"]\n password = request.form[\"password\"]\n print(email, password)\n payload = {\"email\": email, \"password\": password}\n\n response = r.get(\"http://127.0.0.1:5000/signin\", json=payload)\n print(response.status_code)\n\n if response.status_code == 200:\n userName = r.get(\"http://127.0.0.1:5000/getuser\", json={\"email\": email})\n print(userName.json()[\"data\"])\n session[\"username\"] = userName.json()[\"data\"]\n session[\"email\"] = email\n return redirect(url_for(\"home\"))\n\n return render_template(\"login.html\")\n\n\[email protected](\"/register\", methods=[\"GET\", \"POST\"])\ndef register():\n if request.method == \"POST\":\n email = request.form[\"email\"]\n password = request.form[\"password\"]\n username = request.form[\"username\"]\n\n payload = {\"email\": email, \"username\": username, \"password\": password}\n\n response = r.post(\"http://127.0.0.1:5000/signup\", json=payload)\n\n if response.status_code == 201:\n flash(response.json()[\"message\"], \"success\")\n return redirect(url_for(\"login\"))\n\n else:\n flash(response.json()[\"message\"], \"warning\")\n\n return render_template(\"register.html\")\n\n\[email protected](\"/logout\")\ndef logout():\n session.clear()\n return redirect(url_for(\"home\"))\n\n\[email protected](\"/dashboard\")\ndef dashboard():\n if \"email\" not in session:\n return redirect(url_for(\"login\"))\n return \"this is dashboard\"\n\n\[email protected](\"/loginsucess\")\ndef sucess():\n if \"email\" in session:\n return render_template(\"loginsucess.html\", user=session[\"email\"])\n\n return render_template(\"loginsucess.html\")\n\n\[email protected](\"/\")\ndef home():\n if \"email\" in session:\n print(session[\"username\"])\n return render_template(\"base.html\", user=session[\"username\"])\n\n return render_template(\"base.html\")\n\n\nif __name__ == \"__main__\":\n app.config[\"SECRET_KEY\"] = \"this is @$%^&\"\n app.run(host=\"127.0.0.1\", port=8000, debug=True)\n" } ]
3
mat7lui/outcome_measures
https://github.com/mat7lui/outcome_measures
1bf70a3d255d504ab171086facf8b14b943e4145
744b67d4ec301c8a9fb11da2d5563c6587ea85e2
c538ce9bda7a36038def31a217355f78707e7cd5
refs/heads/master
2021-10-11T23:46:50.969798
2021-10-01T14:00:59
2021-10-01T14:00:59
228,916,181
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6235541701316833, "alphanum_fraction": 0.6508938074111938, "avg_line_length": 42.227272033691406, "blob_id": "83315ce9f5118f4bea1104444d5f0fe3cd3d23b8", "content_id": "ae1e8f48e7df9e1538f07fe0c456cfff1c02dd97", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2853, "license_type": "no_license", "max_line_length": 159, "num_lines": 66, "path": "/data_scoring.py", "repo_name": "mat7lui/outcome_measures", "src_encoding": "UTF-8", "text": "import pandas as pd \nfrom datetime import datetime\n\n#### SNIPPETS ####\n# Bit to test for blank questions collected in the survey || pd.isnull(data.loc[1,'camm_1'])\n# Bit to convert each row in the df to a dict || dict_data = df.to_dict('index')\n\ndata = pd.read_csv(r'c:\\Users\\mlui-tankersley\\Outcome_Measures\\cleaned_data\\\\' + str(datetime.today().strftime('%m.%d.%Y')) + '.csv')\n\n\n# Dataframes containing the columns of the subscales of the Outcome Measures battery\ndemo_df = data.iloc[:,:4]\nders_df = data.iloc[:,4:20]\nari_df = data.iloc[:,20:27]\nceas_df = data.iloc[:,27:66]\ndts_df = data.iloc[:,66:81]\ncamm_df = data.iloc[:,81:]\n\n\n################ Difficulty in Emotion Regulation Scale ################\nders_score = ders_df.sum(1)\ndata.insert(loc=4, column='DERS_SCORE', value=ders_score)\n\n################ Affective Reactivity Index ################\nari_score = ari_df.iloc[:,:6].sum(1)\ndata.insert(loc=21, column='ARI_SCORE', value=ari_score)\n\n################ Compassionate Engagement and Action Scale ################\ndrop_questions = [\n 'comp_self_3', 'comp_self_7', 'comp_self_11', \n 'comp_from_3', 'comp_from_7', 'comp_from_11', \n 'comp_to_3', 'comp_to_7', 'comp_to_11', \n ]\n\nceas_df.drop(labels=drop_questions, axis=1, inplace=True)\nceas_self_score = ceas_df.iloc[:,:10].sum(1)\nceas_from_score = ceas_df.iloc[:,10:20].sum(1)\nceas_to_score = ceas_df.iloc[:,20:].sum(1)\n\n################ Distress Tolerance Scale ################\ndts_tolerance = dts_df.loc[:,['dts_1', 'dts_3', 'dts_5']].mean(1)\ndts_appraisal = dts_df.loc[:,['dts_6', 'dts_7', 'dts_9', 'dts_10', 'dts_11', 'dts_12']].mean(1)\ndts_absorption = dts_df.loc[:,['dts_2', 'dts_4', 'dts_15']].mean(1)\ndts_regulaton = dts_df.loc[:,['dts_8', 'dts_13', 'dts_14']].mean(1)\n\ndts_score = (dts_tolerance + dts_appraisal + dts_absorption + dts_regulaton) / 4\n\n#### Child and Adolescent Mindfulness Measure ####\ncamm_score = camm_df.sum(1)\n\n# Building new dataframe with the scores of each client\noutcome_measures_scores = pd.concat(\n [demo_df.loc[:,['last_name','first_name', 'assess_date']], ders_score, ari_score, ceas_self_score, ceas_from_score, ceas_to_score, dts_score, camm_score], \n axis=1)\n\n# Assigning names for the new column headers\nold_cols = outcome_measures_scores.columns\nnew_cols = [\n 'last_name','first_name','assessment_date','ders_score', 'ari_score', 'ceas_self_score', 'ceas_from_score', 'ceas_to_score', 'dts_score', 'camm_score'\n ]\nrenamed_cols = dict(zip(old_cols, new_cols))\noutcome_measures_scores.rename(columns=renamed_cols, inplace=True)\n\n# Exporting results to Outcome_Measures folder with unique name based on the date report was ran\noutput_file_path = r'c:\\Users\\mlui-tankersley\\Outcome_Measures\\scored_data\\\\' + str(datetime.today().strftime('%m.%d.%Y')) + '_SCORED.csv'\noutcome_measures_scores.to_csv(output_file_path, index=False)\n" }, { "alpha_fraction": 0.6117245554924011, "alphanum_fraction": 0.6382080912590027, "avg_line_length": 45.41666793823242, "blob_id": "33a345db21e7bafc655ad696847f5a8581ffa264", "content_id": "dfc66acebb11edfdb93c05a6b4a6ddfcb36ca8b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11139, "license_type": "no_license", "max_line_length": 206, "num_lines": 240, "path": "/measure_tools.py", "repo_name": "mat7lui/outcome_measures", "src_encoding": "UTF-8", "text": "import os\nimport pandas as pd\nfrom datetime import datetime\n\ndef clean_data(import_file_location, dropna=True):\n file_extension = os.path.basename(import_file_location).split('.')[1]\n\n if file_extension == 'csv':\n dataframe = pd.read_csv(import_file_location)\n else:\n dataframe = pd.read_excel(import_file_location)\n\n dataframe.drop(labels=[\n 'Respondent ID', 'Collector ID', 'End Date', \n 'IP Address', 'Email Address', 'First Name', 'Last Name', 'Custom Data 1', \n 'Program'], axis=1, inplace=True)\n dataframe.drop(0, inplace=True)\n dataframe.iloc[:,3] = pd.to_datetime(dataframe.iloc[:,0]).dt.date\n dataframe.drop('Start Date', axis=1, inplace=True)\n \n new_cols = [\n 'first_name', 'last_name', 'assess_date', 'cottage', \n 'ders_1', 'ders_2', 'ders_3','ders_4','ders_5','ders_6','ders_7','ders_8','ders_9','ders_10','ders_11','ders_12','ders_13','ders_14','ders_15','ders_16',\n 'ari_1', 'ari_2', 'ari_3', 'ari_4', 'ari_5', 'ari_6', 'ari_7',\n 'dts_1', 'dts_2', 'dts_3', 'dts_4', 'dts_5', 'dts_6', 'dts_7', 'dts_8', 'dts_9', 'dts_10', 'dts_11', 'dts_12', 'dts_13', 'dts_14', 'dts_15',\n 'ceas_self_1', 'ceas_self_2', 'ceas_self_3', 'ceas_self_4', 'ceas_self_5', 'ceas_self_6', 'ceas_self_7', 'ceas_self_8', 'ceas_self_9', 'ceas_self_10', 'ceas_self_11', 'ceas_self_12', 'ceas_self_13',\n 'ceas_from_1', 'ceas_from_2', 'ceas_from_3', 'ceas_from_4', 'ceas_from_5', 'ceas_from_6', 'ceas_from_7', 'ceas_from_8', 'ceas_from_9', 'ceas_from_10', 'ceas_from_11', 'ceas_from_12', 'ceas_from_13',\n 'ceas_to_1', 'ceas_to_2', 'ceas_to_3', 'ceas_to_4', 'ceas_to_5', 'ceas_to_6', 'ceas_to_7', 'ceas_to_8', 'ceas_to_9', 'ceas_to_10', 'ceas_to_11', 'ceas_to_12', 'ceas_to_13',\n 'camm_1', 'camm_2', 'camm_3', 'camm_4', 'camm_5', 'camm_6', 'camm_7', 'camm_8', 'camm_9', 'camm_10'\n ]\n \n dataframe.columns = new_cols\n dataframe.insert(loc=1, column='name', value=dataframe['last_name'].str.strip(' ') + ',' + dataframe['first_name'].str.strip(' '))\n dataframe.drop(['first_name', 'last_name', 'cottage'], axis=1, inplace=True)\n\n if dropna:\n dataframe.dropna(how='any', inplace=True)\n\n dataframe['assess_date'] = pd.to_datetime(dataframe['assess_date'])\n dataframe.loc[:, \"ders_1\":] = dataframe.loc[:, \"ders_1\":].astype('float64')\n dataframe.reset_index(drop=True, inplace=True)\n dataframe.sort_values(by=[\"name\", \"assess_date\"], inplace=True)\n \n return dataframe\n\ndef clean_avatar_report(avatar_report_path):\n file_extension = os.path.basename(avatar_report_path).split('.')[1]\n if file_extension == 'csv':\n dataframe = pd.read_csv(avatar_report_path)\n else:\n dataframe = pd.read_excel(avatar_report_path)\n # Import and initial cleaning\n dataframe.columns = dataframe.iloc[5] # Resetting the columns headers to their correct values\n dataframe.drop([0,1,2,3,4,5], inplace=True) # Getting rid of blank rows put in by Crystal Report formatting\n dataframe.dropna(axis=1, how=\"all\", inplace=True)\n \n # Selecting only clients in residential/PHP program. \n dataframe = dataframe.loc[(dataframe['Program'] == 'Residential Program') | (dataframe['Program'] == 'PHP + Room and Board Program')]\n \n dataframe.sort_values(by='Adm Date', ascending=False, inplace=True)\n dataframe = dataframe[[\"Client Name\", \"PID\",\"Adm Date\", \"Disc. Date\", \"EP#\", \"Program\"]]\n dataframe[\"Adm Date\"] = pd.to_datetime(dataframe[\"Adm Date\"].dt.date)\n dataframe[\"Disc. Date\"].fillna(value=datetime.today(), inplace=True)\n dataframe[\"Disc. Date\"] = pd.to_datetime(dataframe[\"Disc. Date\"].dt.date)\n dataframe.columns = [\"name\", \"pid\", \"adm_date\", \"disc_date\", \"epn\", \"program\"]\n\n return dataframe\n\ndef score_ders(dataframe):\n '''\n SCORING METHODOLOGY:\n The DERS has 5 recognized subscales:\n - Clarity - lack of emotional clarity\n QUESTIONS(1,2)\n - Goals - difficulties engaging in goal-directed behavior \n QUESTIONS(3,7,15) \n - Impulse - impulse control difficulties\n QUESTIONS(4,8,11)\n - Strategies - limited access to emotion regulatioin strategies\n QUESTIONS(5,6,12,14,16)\n - Non-acceptance - non-acceptance of emotional responses\n QUESTIONS(9,10,13)\n \n SCORING DETAILS:\n Range of scores vary on subscales\n Range of possible OVERALL scores: 16-80\n Good score = LOWER\n Bad score = HIGHER\n '''\n clarity = [\"ders_1\", \"ders_2\"] \n goals = [\"ders_3\", \"ders_7\", \"ders_15\"]\n impulse = [\"ders_4\", \"ders_8\", \"ders_11\"]\n strategies = [\"ders_5\", \"ders_6\", \"ders_12\", \"ders_14\", \"ders_16\"]\n non_acceptance = [\"ders_9\", \"ders_10\", \"ders_13\"]\n\n columns = [col for col in dataframe.columns if \"ders\" in col]\n \n overall = pd.Series(data=dataframe.loc[:, columns].sum(axis='columns'), name='ders_overall')\n clarity_score = pd.Series(data=dataframe.loc[:, clarity].sum(axis='columns'), name='ders_clarity')\n goals_score = pd.Series(data=dataframe.loc[:, goals].sum(axis='columns'), name='ders_goals')\n impulse_score = pd.Series(data=dataframe.loc[:, impulse].sum(axis='columns'), name='ders_impulse')\n strategies_score = pd.Series(data=dataframe.loc[:, strategies].sum(axis='columns'), name='ders_strategies')\n non_acceptance_score = pd.Series(data=dataframe.loc[:, non_acceptance].sum(axis='columns'), name='ders_nonacceptance')\n \n return (overall, clarity_score, goals_score, impulse_score, strategies_score, non_acceptance_score)\n\ndef score_ari(dataframe):\n '''\n SCORING METHODOLOGY:\n ARI score is the sum of the first 6 items. The final question \"Overall irritability\" is not scored.\n \n SCORING DETAILS:\n Range of possible scores: 0-12\n Good score = LOWER\n Bad score = HIGHER\n '''\n \n columns = [col for col in dataframe.columns if \"ari\" in col]\n \n return pd.Series(data=dataframe.loc[:, columns].iloc[:, :6].sum(axis='columns'), name='ari')\n\ndef score_dts(dataframe):\n '''\n SCORING METHODOLOGY:\n The DTS has 4 recognized subscales:\n - Tolerance - ability to tolerate emotions \n QUESTIONS(1,3,5)\n - Appraisal - assessment of the emotional situation as acceptable \n QUESTIONS(6*,7,9,10,11,12) \n - Absorption - level of attention absorbed by the negative emotion and relevant interference with functioning \n QUESTIONS(2,4,15)\n - Regulation - ability to regulate emotion \n QUESTIONS(8,13,14)\n Scores from each subscale are valid and can be calculated by taking the average of each question in the subscale\n The overall DTS score is calculated by taking the average of all the subscale scores.\n \n SCORING DETAILS:\n Range of all possible scores: \n 1-5, as a floating-point value\n Good score = HIGHER\n Bad score = LOWER\n \n * Question 6 is REVERSE scored.\n '''\n \n tolerance = ['dts_1', 'dts_3', 'dts_5']\n appraisal = ['dts_6', 'dts_7', 'dts_9', 'dts_10', 'dts_11', 'dts_12']\n absorption = ['dts_2', 'dts_4', 'dts_15']\n regulation = ['dts_8', 'dts_13', 'dts_14']\n\n # REVERSE SCORING QUESTION 6\n dataframe[\"dts_6\"].replace({1:5, 2:4, 4:2, 5:1}, inplace=True)\n\n tolerance_score = pd.Series(data=dataframe.loc[:, tolerance].mean(axis='columns'), name=\"dts_tolerance\")\n appraisal_score = pd.Series(data=dataframe.loc[:, appraisal].mean(axis='columns'), name=\"dts_appraisal\")\n absorption_score = pd.Series(data=dataframe.loc[:, absorption].mean(axis='columns'), name=\"dts_absorption\")\n regulation_score = pd.Series(data=dataframe.loc[:, regulation].mean(axis='columns'), name=\"dts_regulation\")\n overall_score = pd.Series(data=(tolerance_score + appraisal_score + absorption_score + regulation_score) / 4, name=\"dts_overall\")\n \n return (overall_score, tolerance_score, appraisal_score, absorption_score, regulation_score)\n\ndef score_ceas(dataframe):\n '''\n SCORING METHODOLOGY:\n Within each component of the CEAS (Self-Compassion, Compassion TOWARDS others, Compassion FROM others), there are two separate domains:\n - Engagement QUESTIONS(1,2,4,5,6,8)\n - Action QUESTIONS(9,10,12,13)\n These two domains are scored separately (QUESTIONS 3, 7, and 11 are not included in scoring) and the component scores are derived from \n the sum of the respective Engagement & Action scales. \n \n SCORING DETAILS:\n Range of possible scores: \n Engagement = 6-60 \n Action = 4-40\n Component-level = 10-100 \n '''\n\n ceas = dataframe.loc[:, [col for col in dataframe.columns if \"ceas_\" in col]]\n \n cols_to_drop = ['ceas_self_3', 'ceas_self_7', 'ceas_self_11',\n 'ceas_to_3', 'ceas_to_7', 'ceas_to_11', \n 'ceas_from_3', 'ceas_from_7', 'ceas_from_11']\n ceas.drop(labels=cols_to_drop, axis='columns', inplace=True)\n \n self_cols = [col for col in ceas.columns if \"self\" in col]\n to_cols = [col for col in ceas.columns if \"to\" in col]\n from_cols = [col for col in ceas.columns if \"from\" in col]\n \n ceas_self = pd.Series(data=ceas.loc[:, self_cols].sum(axis=1), name='ceas_self')\n ceas_to = pd.Series(data=ceas.loc[:, to_cols].sum(axis=1), name='ceas_to')\n ceas_from = pd.Series(data=ceas.loc[:, from_cols].sum(axis=1), name='ceas_from')\n \n return (ceas_self, ceas_to, ceas_from)\n\ndef score_camm(dataframe):\n '''\n SCORING METHODOLGY:\n CAMM score is simply the sum of all questions* on the scale. \n \n SCORING DETAILS:\n Range of possible scores: 0-40\n \n * All questions on the CAMM are reverse scored\n '''\n columns = [col for col in dataframe.columns if \"camm\" in col]\n \n return pd.Series(data=dataframe.loc[:, columns].sum(axis='columns'), name='camm')\n\ndef generate_scores(datasource):\n # Cleaning dataset to enable proper scoring in various functions\n if isinstance(datasource, str):\n dataframe = clean_data(datasource)\n elif isinstance(datasource, pd.DataFrame):\n dataframe = datasource\n else:\n print(\"Could not generate scores. Datasource was not directory or DataFrame object\")\n return None\n \n # Calculating scores and returning each as a Series object\n ders_overall, clarity, goals, impulse, strategies, non_acceptance = score_ders(dataframe)\n ari_series = score_ari(dataframe)\n ceas_self, ceas_to, ceas_from = score_ceas(dataframe)\n dts_overall, tolerance, appraisal, absorption, regulation = score_dts(dataframe)\n camm_series = score_camm(dataframe)\n \n # Building scored DataFrame\n dataframe = pd.concat(\n [dataframe.loc[:,:\"assess_date\"],\n ders_overall, clarity, goals, impulse, strategies, non_acceptance, \n ari_series, \n dts_overall, tolerance, appraisal, absorption, regulation, \n ceas_self, ceas_to, ceas_from, \n camm_series], \n axis=1)\n \n dataframe.sort_values(by=[\"name\", \"assess_date\"], inplace=True)\n\n return dataframe\n\n# TODO add if __name__ == \"__main__\": segment to trigger function cascade to complete import prep process" }, { "alpha_fraction": 0.8206896781921387, "alphanum_fraction": 0.8206896781921387, "avg_line_length": 71.5, "blob_id": "3e5144ecb520f1f6395eae2ff4e0c1b00b6bc2b1", "content_id": "249b17867c9979c0b2c22006e689ad32b491eb9b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 145, "license_type": "no_license", "max_line_length": 125, "num_lines": 2, "path": "/README.md", "repo_name": "mat7lui/outcome_measures", "src_encoding": "UTF-8", "text": "# outcome_measures\nThis repository contains all the data cleaning, import prep, and various other script files for the Outcome Measures project.\n" }, { "alpha_fraction": 0.5217582583427429, "alphanum_fraction": 0.58681321144104, "avg_line_length": 58.09090805053711, "blob_id": "209eee1091a33d956dea3393679788afcd44bb8c", "content_id": "641bea446b8ac7467b40da1f3f949c1e220c842e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4550, "license_type": "no_license", "max_line_length": 206, "num_lines": 77, "path": "/data_clean.py", "repo_name": "mat7lui/outcome_measures", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport os\nimport sys\nfrom datetime import datetime\n\ndef clean_return(import_file_location):\n # Path to raw input data from Survey Monkey\n data = pd.read_excel(import_file_location)\n\n data.drop(labels=[\n 'Respondent ID', 'Collector ID', 'Start Date', 'End Date', \n 'IP Address', 'Email Address', 'First Name', 'Last Name', 'Custom Data 1', \n 'Program'], axis=1, inplace=True)\n data.drop(0, inplace=True)\n\n new_cols = [\n 'first_name', 'last_name', 'assess_date', 'cottage', \n 'ders16_1', 'ders16_2', 'ders16_3','ders16_4','ders16_5','ders16_6','ders16_7','ders16_8','ders16_9','ders16_10','ders16_11','ders16_12','ders16_13','ders16_14','ders16_15','ders16_16',\n 'ari_1', 'ari_2', 'ari_3', 'ari_4', 'ari_5', 'ari_6', 'ari_7',\n 'comp_self_1', 'comp_self_2', 'comp_self_3', 'comp_self_4', 'comp_self_5', 'comp_self_6', 'comp_self_7', 'comp_self_8', 'comp_self_9', 'comp_self_10', 'comp_self_11', 'comp_self_12', 'comp_self_13',\n 'comp_from_1', 'comp_from_2', 'comp_from_3', 'comp_from_4', 'comp_from_5', 'comp_from_6', 'comp_from_7', 'comp_from_8', 'comp_from_9', 'comp_from_10', 'comp_from_11', 'comp_from_12', 'comp_from_13',\n 'comp_to_1', 'comp_to_2', 'comp_to_3', 'comp_to_4', 'comp_to_5', 'comp_to_6', 'comp_to_7', 'comp_to_8', 'comp_to_9', 'comp_to_10', 'comp_to_11', 'comp_to_12', 'comp_to_13',\n 'dts_1', 'dts_2', 'dts_3', 'dts_4', 'dts_5', 'dts_6', 'dts_7', 'dts_8', 'dts_9', 'dts_10', 'dts_11', 'dts_12', 'dts_13', 'dts_14', 'dts_15',\n 'camm_1', 'camm_2', 'camm_3', 'camm_4', 'camm_5', 'camm_6', 'camm_7', 'camm_8', 'camm_9', 'camm_10'\n ]\n data.columns = new_cols\n\n data['assess_date'] = pd.to_datetime(data['assess_date'])\n return data\n\ndef clean_and_export(import_file_location, export_file_location, open_file=False):\n assert os.path.exists(import_file_location), 'Path to data not found'\n\n # Default destination for the cleaned CSV file if not export_file_location is provided\n default_output_path = r'c:\\Users\\mlui-tankersley\\Outcome_Measures\\cleaned_data\\\\' + str(datetime.today().strftime('%m.%d.%Y')) + '.csv'\n\n # Path to raw input data from Survey Monkey\n data = pd.read_excel(import_file_location)\n\n data.drop(labels=[\n 'Respondent ID', 'Collector ID', 'Start Date', 'End Date', \n 'IP Address', 'Email Address', 'First Name', 'Last Name', 'Custom Data 1', \n 'Program'], axis=1, inplace=True)\n data.drop(0, inplace=True)\n\n new_cols = [\n 'first_name', 'last_name', 'assess_date', 'cottage', \n 'ders16_1', 'ders16_2', 'ders16_3','ders16_4','ders16_5','ders16_6','ders16_7','ders16_8','ders16_9','ders16_10','ders16_11','ders16_12','ders16_13','ders16_14','ders16_15','ders16_16',\n 'ari_1', 'ari_2', 'ari_3', 'ari_4', 'ari_5', 'ari_6', 'ari_7',\n 'comp_self_1', 'comp_self_2', 'comp_self_3', 'comp_self_4', 'comp_self_5', 'comp_self_6', 'comp_self_7', 'comp_self_8', 'comp_self_9', 'comp_self_10', 'comp_self_11', 'comp_self_12', 'comp_self_13',\n 'comp_from_1', 'comp_from_2', 'comp_from_3', 'comp_from_4', 'comp_from_5', 'comp_from_6', 'comp_from_7', 'comp_from_8', 'comp_from_9', 'comp_from_10', 'comp_from_11', 'comp_from_12', 'comp_from_13',\n 'comp_to_1', 'comp_to_2', 'comp_to_3', 'comp_to_4', 'comp_to_5', 'comp_to_6', 'comp_to_7', 'comp_to_8', 'comp_to_9', 'comp_to_10', 'comp_to_11', 'comp_to_12', 'comp_to_13',\n 'dts_1', 'dts_2', 'dts_3', 'dts_4', 'dts_5', 'dts_6', 'dts_7', 'dts_8', 'dts_9', 'dts_10', 'dts_11', 'dts_12', 'dts_13', 'dts_14', 'dts_15',\n 'camm_1', 'camm_2', 'camm_3', 'camm_4', 'camm_5', 'camm_6', 'camm_7', 'camm_8', 'camm_9', 'camm_10'\n ]\n\n data.columns = new_cols\n\n data['assess_date'] = pd.to_datetime(data['assess_date'])\n\n if export_file_location == \"\":\n data.to_csv(default_output_path, index=False)\n print(f\"\\nProgram successfully completed.\\nFormatted file was exported to {default_output_path}\")\n if open_file == True:\n print('Opening file now...Thank you!')\n os.startfile(default_output_path)\n else:\n print(\"Process completed. Thank you!\")\n\n else:\n data.to_csv(export_file_location, index=False)\n print(f\"Program successfully completed.\\nFormatted file was exported to {export_file_location}.\")\n if open_file == True:\n print('Opening file now...Thank you!')\n os.startfile(export_file_location)\n else:\n print(\"Process completed. Thank you!\")\n" }, { "alpha_fraction": 0.6943238973617554, "alphanum_fraction": 0.7082104086875916, "avg_line_length": 57.191917419433594, "blob_id": "567e03ac6288dde9cb5ed6d1fc670a1d2c7734ff", "content_id": "20fa66d9f6dfa3d31f32a085a81d258e816a4487", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5761, "license_type": "no_license", "max_line_length": 215, "num_lines": 99, "path": "/import_prep.py", "repo_name": "mat7lui/outcome_measures", "src_encoding": "UTF-8", "text": "'''\n### PROGRAM PURPOSE: ####\nThis program is intended to take a raw file, exported from Survey Monkey which contains survey data \nfrom the Outcome Measures battery of assessments, pairs it with matched identification information from\nthe Avatar \"Admissions in Date Range\" report, and exports 6 separate CSVs.\n\n#### OUTPUT TYPES: ####\nAsssessment-level import file(s): A file structured to easily import into Avatar, Hillside's EHR database.\nThis program creates 5 assessment-level files and 1 file for any non-matched names. The 5 assessment-level files are:\n - Difficulties in Emotion Regulation Scale, 16-item version (DERS-16)\n - Affective Reactivity Index (ARI)\n - Distress Tolerance Scale (DTS)\n - Compassionate Engagement and Action Scales (CEAS)\n - Child and Adolescent Mindfulness Measure (CAMM)\nThe sixth file contains all rows that did not find a match during processing. This is most likely due to names being \nspelled incorrectly at the time of entry into Survey Monkey. These files will have to be manually reviewed and appropriately\nmatched with a client ID number and the correct episode number for the assessment(s) in question. \n\n#### REQUIRED INFORMATION: ####\nThe program requires input from the user specifying the location of the raw data file which would have been \ndownloaded prior to runtime as well as a copy of an 'Admissions by Date Range' report from AVATAR. \n'''\nfrom measure_tools import clean_data, clean_avatar_report\nimport pandas as pd\nimport os\nimport platform\nfrom datetime import datetime\nimport sys\n\n# Setting the destination for final files based on current computer operating system\nif platform.system() == \"Windows\":\n output_path = r\"U:/Outcome_Measures/Import_ready_files/\"\n print(f\"\\nYOU ARE RUNNING THIS FILE ON WINDOWS. EXPORT LOCATION WILL BE: {output_path}\")\nelse:\n output_path = r\"/Users/mattlui/Desktop/outcome_measures/data_dump/\"\n print(f\"\\nYOU ARE RUNNING THIS FILE ON MacOS. EXPORT LOCATION WILL BE: {output_path}\")\n\n# DIRECTORY SPECIFICATION & BATCH IDENTIFICATION\nbatch_id = str(input(\"Please enter current batch number: \"))\nraw_file = os.path.join(r\"C:\\Users\\mlui-tankersley\\Downloads\", \"batch_\" + batch_id, \"Excel\",\"Outcome Measures.xlsx\")\navatar_report_path = os.path.join(r\"U:\\Outcome_Measures\\avatar_admissions_reports\", \"batch_\" + batch_id + \".xls\")\n\n# BACKUP DIRECTORY SPECIFICATION IN CASE SURVEY MONKEY DOES NOT INCLUDE AN 'EXCEL' FOLDER AFTER UN-ZIPPING DATA FILE\nif os.path.exists(raw_file) == False:\n raw_file = os.path.join(r\"C:\\Users\\mlui-tankersley\\Downloads\", \"batch_\" + batch_id,\"Outcome Measures.xlsx\")\n\n# DIRECTORY VALIDATION\nwhile os.path.exists(raw_file) == False:\n raw_file = input(\"Path not found. Please enter path to raw data or enter 'q' to exit program:\\n\")\n if raw_file.lower() == 'q':\n sys.exit()\n\nwhile os.path.exists(avatar_report_path) == False:\n avatar_report_path = input(\"Path not found. Please enter path to Avatar Admissions report or enter 'q' to exit program:\\n\")\n if avatar_report_path.lower() == 'q':\n sys.exit()\n\n# DATA CLEANING\ndf = clean_data(raw_file)\navatar_df = clean_avatar_report(avatar_report_path)\ndf = df.loc[df['name'].str.lower().sort_values().index] # Case insensitive sorting in-place\n\n# Merging names with Avatar IDs and EPNs\nmerged = df.merge(avatar_df, how=\"left\", on=\"name\")\nmissing = merged.loc[merged[\"pid\"].isnull()]\nmatched = merged[(merged[\"adm_date\"] <= merged[\"assess_date\"]) & (merged[\"assess_date\"] <= merged[\"disc_date\"])]\n\n# Combining the matches with the non-matches for hand review\ncombined = pd.concat([missing, matched])\n\ncombined[\"ders_assessment_type\"] = '15'\ncombined[\"ders_draft_final\"] = 'D'\ncombined[\"ari_total\"] = combined.loc[:,[col for col in combined.columns if \"ari\" in col]].astype(\"float64\").sum(axis=1)\ncombined[\"dts_status\"] = 'D'\n\n# Reorganizing dataframe into better column sequence & dropping CEAS columns from further analysis\ncombined = combined[[\n 'name', 'pid', 'entered_id', 'epn', 'entered_epn', 'adm_date', 'assess_date', 'disc_date',\n 'ders_1', 'ders_2', 'ders_3', 'ders_4', 'ders_5', 'ders_6', 'ders_7', 'ders_8', 'ders_9', 'ders_10', 'ders_11', 'ders_12', 'ders_13', 'ders_14', 'ders_15', 'ders_16', 'ders_assessment_type', 'ders_draft_final', \n 'ari_1', 'ari_2', 'ari_3', 'ari_4', 'ari_5', 'ari_6', 'ari_7', 'ari_total', \n 'dts_1', 'dts_2', 'dts_3', 'dts_4', 'dts_5', 'dts_6', 'dts_7', 'dts_8', 'dts_9', 'dts_10', 'dts_11', 'dts_12', 'dts_13', 'dts_14', 'dts_15', 'dts_status',\n 'camm_1', 'camm_2', 'camm_3', 'camm_4', 'camm_5', 'camm_6', 'camm_7', 'camm_8', 'camm_9', 'camm_10']\n]\n\ncombined[[\"pid\", \"entered_id\"]] = combined.loc[:, [\"pid\", \"entered_id\"]].astype(\"float64\") # Standardizing typing for numerical columns to enable boolean comparisons for matches\ncombined.insert(loc=0, column='id_matched', value=combined['pid'] == combined['entered_id']) # Avatar ID from algorithm matching and staff-entered values comparison\ncombined.insert(loc=0, column='epn_matched', value=combined['epn'] == combined['entered_epn']) # Avatar EPN from algorithm matching and staff-entered values comparison \ncombined.insert(loc=0, column=\"matched_all\", value=(combined[\"id_matched\"] & combined[\"epn_matched\"]))\ncombined.sort_values(by=['matched_all', 'id_matched','name'], inplace=True)\ncombined.fillna(value=\"Missing\", inplace=True)\n\noutput_path = output_path + \"batch_\" + batch_id + \"-\" + str(datetime.today().strftime('%m.%d.%Y')) + '.csv'\ncombined.to_csv(path_or_buf=output_path, index=False)\n\nif platform.system() == \"Windows\":\n print('\\nOpening window to exported files...dot..dot..dot..')\n os.startfile(r'U:/Outcome_Measures/Import_ready_files')\nelse:\n print(f\"Processing completed. Output files dropped in {output_path}\")\n" }, { "alpha_fraction": 0.6192810535430908, "alphanum_fraction": 0.6234827041625977, "avg_line_length": 45.576087951660156, "blob_id": "14097057611e226e42ebd485c512d436bb9f1c02", "content_id": "8746d87255f741e6890980ab7dd13dacca59fa7a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4284, "license_type": "no_license", "max_line_length": 181, "num_lines": 92, "path": "/xml_tools.py", "repo_name": "mat7lui/outcome_measures", "src_encoding": "UTF-8", "text": "from numpy.lib.function_base import copy\nimport pandas as pd\nfrom xml.etree import ElementTree as ET\nfrom xml.dom.minidom import parseString\nfrom collections import namedtuple\n\n# TODO \n# Think about whether it's logical to create a dictionary to convert df column names into the XML tags\n# If so, does each dictionary live within a separate inherited sub-class or can it be bound to the parent class?\n# Check placement of patient_id and episode_num variables. They are repeated within the XML file so they should probably live in a sub-class\n\nclass Batch:\n mode = None\n option = ET.Element(\"option\")\n opt_id = ET.SubElement(option, \"optionidentifier\")\n client_data = ET.SubElement(option, \"optiondata\")\n system_tags = {\n 'ders':'SYSTEM.DERS_16',\n 'ari':'SYSTEM.ARI',\n 'dts':'SYSTEM.distress_tolerance',\n 'camm':'SYSTEM.camm'\n }\n\n def __init__(self, assessment_type=None):\n '''\n Initializing the Batch object requires only that the type of assessment data being passed be specified as a string.\n Valid assessment types inputs include: \n - \"ders\": Difficulty in Emotion Regulation Scale\n - \"ari\": Affective Reactivity Index\n - \"dts\": Distress Tolerance Scale\n - \"camm\": Child and Adolescent Mindfulness Measure\n ''' \n if assessment_type.lower() == \"ders\":\n self.mode = assessment_type\n self.opt_id.text = \"USER119\"\n print(f\"Batch initialized in: {self.mode.upper()} mode\")\n elif assessment_type.lower() == \"ari\":\n self.mode = assessment_type\n self.opt_id.text = \"USER124\"\n print(f\"Batch initialized in: {self.mode.upper()} mode\")\n elif assessment_type.lower() == \"dts\":\n self.mode = assessment_type\n self.opt_id.text = \"USER130\"\n print(f\"Batch initialized in: {self.mode.upper()} mode\")\n elif assessment_type.lower() == \"camm\":\n self.mode = assessment_type\n self.opt_id.text = \"USER129\"\n print(f\"Batch initialized in: {self.mode.upper()} mode\")\n else:\n self.mode = assessment_type.lower()\n print(\"You did something wrong, @$$hole.\\nSpecify assessment type as one of the following:\\n\\t['ders', 'ari', 'dts', 'camm']\")\n\n def __str__(self):\n '''\n Represents the print output of a Batch object as a neatly formatted XML file.\n Or, if you screw it up, it calls you an @$$hole.\n '''\n if self.mode == None:\n return \"\\nHey everyone, check out this @$$hole. \\nBatch wasn't created, go back and specify your assessment type again.\"\n else:\n document = ET.tostring(self.option, encoding='utf-8')\n formatted_xml = parseString(document)\n return formatted_xml.toprettyxml(indent=\"\\t\")\n \n def write(self, output_path):\n document = ET.ElementTree(self.option)\n document.write(file_or_filename=output_path, encoding=\"utf-8\", xml_declaration=True)\n \n def add_data(self, data=None):\n # This function will be used to parse a namedtuple from a cleaned dataframe containing client data\n if data is None:\n print(\"No data provided\")\n else: \n try:\n patient_id = ET.SubElement(self.client_data, \"PATID\")\n patient_id.text = data\n episode_num = ET.SubElement(self.client_data, \"EPISODE_NUMBER\")\n episode_num.text = data\n system_tag = ET.SubElement(self.client_data, self.system_tags[self.mode]) # Fix this so that it recognizes and adjusts value to match the \"mode\" of the parent class\n system_tag.text = data\n except KeyError:\n print(f\"DataBlock was initialized with an invalid assessment_type. Valid types include: ['ders', 'ari', 'dts', 'camm']. \\nCurrent assessment_type = {self.mode}\")\n \ndf = pd.read_csv(\"./data_files/xml_dataset.csv\")\n\n# Try again to see if it's possible to access the column headers with df.itertuples()\nfor index, item in df.iterrows():\n print(item['ders_1']) # prints just the ders_1 for each client.\n\n# x = Batch(assessment_type=\"ders\")\n# x.add_data()\n# x.write(\"dummy_output.xml\")" } ]
6
xjsxujingsong/CDTM-Deep-Learning-Drones
https://github.com/xjsxujingsong/CDTM-Deep-Learning-Drones
47cbe486ca6fc9ef2711ada39be0a906c7d8d107
dabb7ad064944c4936cec73e687b3d25ec632733
a5e39ba88034391d358bbdca188d888e7abcee22
refs/heads/master
2021-06-18T16:27:20.192462
2017-06-24T10:12:12
2017-06-24T10:12:12
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6162142753601074, "alphanum_fraction": 0.6365485191345215, "avg_line_length": 23.535791397094727, "blob_id": "d088868b3845b47bacf43cc951e70f6a12f2c42e", "content_id": "77ffd2c57298ef2ee75b8bf536576f753519fc19", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11311, "license_type": "permissive", "max_line_length": 184, "num_lines": 461, "path": "/EAGLE-EYE/eagle-eye.py", "repo_name": "xjsxujingsong/CDTM-Deep-Learning-Drones", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n\"\"\"Eagle_Eye app for the AR.Drone.\n\nThis application allows to control the AR.Drone via a PC keyboard while seeing the camera stream.\nIt also features an autonomous mode in which the drone follows an \"aruco\"-marker without active control by a pilot.\n\"\"\"\n\n\nimport cv2\nimport cv2.aruco as aruco\nimport libardrone.libardrone as libardrone\nimport numpy as np\n\nimport threading\nimport time\n\nexitFlag = 0\n\n# thread to control the drone while in manual control mode. Basic control stays active in\n# autonomous mode to still allow interception if required\nclass manualControlThread (threading.Thread):\n\tdef __init__(self, threadID, name):\n\t\tthreading.Thread.__init__(self)\n\t\tself.threadID = threadID\n\t\tself.name = name\n\t\tself.running = True\n\t\t\n\tdef run(self):\n\t\t\n\t\tglobal drone\n\t\tglobal exiting\n\t\tglobal manual_mode\n\t\tglobal thread_lock\n\t\tglobal thread_lock_manual_mode\n\t\tglobal thread_lock_key\n\t\tglobal key\n\t\t\n\t\tprint \"Starting \" + self.name\n\t\t\n\t\twhile self.running:\n\t\t\t# query pressed keys\n\t\t\tthread_lock_key.acquire()\n\t\t\tk = key\n\t\t\tthread_lock_key.release()\n\t\t\t# escape to stop program execution\n\t\t\tif k == 27: # 27=escape\n\t\t\t\tself.running = False\n\t\t\t\tthread_lock.acquire()\n\t\t\t\texiting = True\n\t\t\t\tthread_lock.release()\n\t\t\t\tdrone.reset()\n\t\t\t # takeoff\n\t\t\telif k == 13: # 13=enter\n\t\t\t\tprint(\"return\")\n\t\t\t\tdrone.takeoff()\n\t\t\t# land\n\t\t\telif k == 32: # 32=space\n\t\t\t\tprint(\"space\")\n\t\t\t\tdrone.land()\n\t\t\t# emergency\n\t\t\telif k == 8: # 8=backspace\n\t\t\t\tdrone.reset()\n\t\t\t# switch control mode\n\t\t\telif k == ord('m'):\n\t\t\t\tdrone.hover()\n\t\t\t\tthread_lock_manual_mode.acquire()\n\t\t\t\tmanual_mode = not manual_mode\n\t\t\t\tthread_lock_manual_mode.release()\n\t \t\t # switch between manual and autonomous control\n\t\t\telif manual_mode:\n\t\t\t\t# listen for additional key events for manual control\n\t\t\t\t# forward / backward\n\t\t\t\tif k == ord('w'):\n\t\t\t\t\tdrone.move_forward()\n\t\t\t\telif k == ord('s'):\n\t\t\t\t\tdrone.move_backward()\n\t\t\t\t# left / right\n\t\t\t\telif k == ord('a'):\n\t\t\t\t\tdrone.move_left()\n\t\t\t\telif k == ord('d'):\n\t\t\t\t\tdrone.move_right()\n\t\t\t\t# up / down\n\t\t\t\telif k == 2490368:\n\t\t\t\t\tdrone.move_up()\n\t\t\t\telif k == 2621440:\n\t\t\t\t\tdrone.move_down()\n\t\t\t\t# turn left / turn right\n\t\t\t\telif k == 2424832:\n\t\t\t\t\tdrone.turn_left()\n\t\t\t\telif k == 2555904:\n\t\t\t\t\tdrone.turn_right()\n\t\t\t\t# speed\n\t\t\t\telif k == ord('1'):\n\t\t\t\t\tdrone.speed = 0.1\n\t\t\t\telif k == ord('2'):\n\t\t\t\t\tdrone.speed = 0.2\n\t\t\t\telif k == ord('3'):\n\t\t\t\t\tdrone.speed = 0.3\n\t\t\t\telif k == ord('4'):\n\t\t\t\t\tdrone.speed = 0.4\n\t\t\t\telif k == ord('5'):\n\t\t\t\t\tdrone.speed = 0.5\n\t\t\t\telif k == ord('6'):\n\t\t\t\t\tdrone.speed = 0.6\n\t\t\t\telif k == ord('7'):\n\t\t\t\t\tdrone.speed = 0.7\n\t\t\t\telif k == ord('8'):\n\t\t\t\t\tdrone.speed = 0.8\n\t\t\t\telif k == ord('9'):\n\t\t\t\t drone.speed = 0.9\n\t\t\t\telif k == ord('0'):\n\t\t\t\t drone.speed = 1.0\n\t\t\t\t# if no matching input: hover\n\t\t\t\telse:\n\t\t\t\t drone.hover()\n\t\t\t \n\t\tprint(\"Shutting down...\")\n\t\tdrone.halt()\n\t\tprint(\"Ok.\")\t\t\n\t\tprint \"Exiting \" + self.name\n\n# thread that performs the automatic control of the drone while not in manual mode\nclass automaticControlThread (threading.Thread):\n\tdef __init__(self, threadID, name):\n\t\tthreading.Thread.__init__(self)\n\t\tself.threadID = threadID\n\t\tself.name = name\n\t\tself.running = True\n\t\tself.status = \"Start\"\n\t\t\n\t\t\n\tdef run(self):\n\t\t\n\t\tglobal drone\n\t\tglobal thread_lock\n\t\tglobal thread_lock_manual_mode\n\t\tglobal thread_lock_camera_frame\n\t\tglobal exiting\n\t\tglobal manual_mode\n\t\tglobal W\n\t\tglobal H\n\t\tglobal new_frame\n\t\tglobal render_frame\n\t\tprint \"Starting \" + self.name\n\t\t\n\t\t# initializations for startup\n\t\tmiss_counter = 0\n\t\tp = (-W\t,0)\t\n\t\t\n\t\t# loop while flight active\n\t\twhile self.running:\n\t\t\tthread_lock.acquire()\n\t\t\tif exiting:\n\t\t\t\tthread_lock.release()\n\t\t\t\tself.running = False\n\t\t\telse:\n\t\t\t\tthread_lock.release()\n\t\t\t\t\n\t\t\t\tthread_lock_manual_mode.acquire()\n\t\t\t\t# check if in autonomous mode\n\t\t\t\tif not manual_mode:\n\t\t\t\t\tthread_lock_manual_mode.release()\n\n\t\t\t\t\t# Tracking \n\t\t\t\t\tif self.status == \"Start\":\n\t\t\t\t\t\t# To do with image recognition\n\t\t\t\t\t\tself.status = \"Searching\"\n\t\t\t\t\telif self.status == \"Searching\":\n\t\t\t\t\t\t# To do turn and search marker\n\t\t\t\t\t\tp,area = self.getAndSearchImage()\n\t\t\t\t\t\tif p[0] >= -W/2:\n\t\t\t\t\t\t\tself.status = \"Tracking\"\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tdrone.hover()\n\t\t\t\t\t\tprint 'Searching'\n\t\t\t\t\telif self.status == \"Tracking\":\n\t\t\t\t\t\t# While in tracking mode, allow 4 misses(frames without marker detection) before going back to hovering\n\t\t\t\t\t\tif p[0] >= -W/2:\n\t\t\t\t\t\t\tmiss_counter = 0\n\t\t\t\t\t\t\t# call controller\n\t\t\t\t\t\t\tself.controlStep(p,area)\n\t\t\t\t\t\t\t# get next frame from camera stream, track marker and display it\n\t\t\t\t\t\t\tp,area = self.getAndSearchImage()\n\t\t\t\t\t\telif miss_counter < 5:\n\t\t\t\t\t\t\tmiss_counter += 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tself.status = \"Searching\"\n\t\t\t\t\t\tprint 'tracking'\n\t\t\t\t# if not in autonomous mode: deliver frames from camarea stream without marker tracking (colored)\n\t\t\t\telse:\n\t\t\t\t\tthread_lock_manual_mode.release()\n\t\t\t\t\ttry:\n\t\t\t\t\t\t# pull image\n\t\t\t\t\t\tpixelarray = drone.get_image()\n\t\t\t\t\t\tif pixelarray != None:\n\t\t\t\t\t\t\tframe = pixelarray[:,:,::-1].copy()\n\t\t\t\t\t\t#resize image\n\t\t\t\t\t\t\tresized=cv2.resize(frame,(W,H))\n\t\t\t\t\t\t\n\t\t\t\t\t\tthread_lock_camera_frame.acquire()\n\t\t\t\t\t\trender_frame = resized\n\t\t\t\t\t\tnew_frame = True\n\t\t\t\t\t\tthread_lock_camera_frame.release()\n\t\t\t\t\texcept:\n\t\t\t\t\t\tpass\n\t\tprint \"Exiting\" + self.name\n\t\t\n\t# function used by the automomous control thread retrieve an image from the camera stream and perform the tracking\n\tdef getAndSearchImage(self):\n\t\tglobal drone\n\t\tglobal thread_lock_camera_frame\n\t\tglobal render_frame\n\t\tglobal new_frame\n\t\tglobal W\n\t\tglobal H\n\t\ttry:\n\t\t\t# print pygame.image\n\t\t\tpixelarray = drone.get_image()\n\t\t\tif pixelarray != None:\n\t\t\t\tframe = pixelarray[:,:,::-1].copy()\n\t\t\t\t#resize image\n\t\t\t\tresized=cv2.resize(frame,(W,H))\n\t\t\t\t# aruco detection\n\t\t\t\tgray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)\n\t\t\t\taruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)\n\t\t\t\tparameters = aruco.DetectorParameters_create()\n\t\t\n\t\t\t\t#lists of ids and the corners beloning to each id\n\t\t\t\tcorners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)\n\t\t\t\t\n\t\t\t\t# iterate over all found markers to determine the one to follow (biggest one)\n\t\t\t\tselected_corners = []\n\t\t\t\tmax_index = (0,0)\n\t\t\t\tcounter = 0\n\t\t\t\tif len(corners) > 0:\n\t\t\t\t\tdim = corners[0].shape\n\t\t\t\t\t#print dim\n\t\t\t\t\twhile counter<dim[0]:\n\t\t\t\t\t\ttmp_corners = ((corners[0])[counter])\n\t\t\t\t\t\t# A=(1/2)|[(x3-x1)(y4-y2) +(x4-x2)(y1-y3)]|\n\t\t\t\t\t\tarea = 0.5*((tmp_corners[2][0] - tmp_corners[0][0]) * (tmp_corners[3][1] - tmp_corners[1][1]) + (tmp_corners[3][0] - tmp_corners[1][0]) * (tmp_corners[0][1] - tmp_corners[2][1]))\n\t\t\t\t\t\tif area > max_index[0]:\n\t\t\t\t\t\t\tmax_index = (area,counter)\n\t\t\t\t\t\tcounter +=1\n\n\t\t\t\t\tmax_corners = ((corners[0])[max_index[1]])\n\t\t\t\t\tselected_corners = np.array([np.array([(corners[0])[max_index[1]]],dtype=np.float32)])#[max_index[0]*4:max_index[0]*4+3]\n\t\t\t\t\t\n\t\t\t\t# draw all markers\n\t\t\t\tdisplay = aruco.drawDetectedMarkers(resized, corners)\n\n\t\t\t\tthread_lock_camera_frame.acquire()\n\t\t\t\trender_frame = display\n\t\t\t\tnew_frame = True\n\t\t\t\tthread_lock_camera_frame.release()\n\n\t\t\t\t# prepare function output\n\t\t\t\tif len(selected_corners) > 0:\n\t\t\t\t\tx,y = max_corners.sum(axis=0)/4\n\t\t\t\t\tarea = max_index[0]\n\t\t\t\telse:\n\t\t\t\t\tx = -W\n\t\t\t\t\ty = -1\n\t\t\t\t\tarea = -1\n\t\t\t\treturn (x-W/2,y-H/2), area\n\t\texcept:\n\t\t\tpass\n\t\t\n\t# function to perform a single control step in the autonomous control thread\n\t# TODO: vectorize calculation to avoid redundancy, determine I&D controller values(currently commented out)\n\tdef controlStep(self,p,area):\n\t\t\n\t\tglobal tx_prev\n\t\tglobal uix_prev\n\t\tglobal ex_prev\n\t\tglobal uif_prev\n\t\tglobal ef_prev\n\t\tglobal uiy_prev\n\t\tglobal ey_prev\n\t\tup_down = 0\n\t\tleft_right = 0\n\t\tfront_back = 0\n\t\tx = p[0]\t \n\t\ty = p[1]\n\t\t\n\t\tmove_command = False\n\n\t\tMAX_SPEED_ROT = 1.5\n\t\tMAX_SPEED_MOVE = 2.0\n\n\t\t# control direction\n\n\t\tK_px=1.0\n\t\tK_dx=1.0\n\t\tK_ix=1.0\t\n\t\tux_threshold = 15\t\t\n\t\t\t\n\t\t#control x\n\t\t#error for x between the desired and actual output\n\t\tex = 0 - x\n\t\tif tx_prev == 0:\n\t\t\ttx_prev = time.time() - 0.008\n\t\ttx = time.time() - tx_prev\n\t\t\n\t\t#Integration input\n\t\tuix = uix_prev + 1/K_ix * tx*ex\n\t\t#Derivation input\n\t\tudx = 1/K_dx * (ex-ex_prev)/tx\n\t\t\n\t\t#adjust previous values\n\t\tex_prev = ex\n\t\ttx_prev += tx\n\t\tuix_prev = uix\n\t\n\t\t#calculate input for the system\n\t\tux = K_px * (ex) #+ uix + udx)\n\t\t\n\t\tif ux < -ux_threshold or ux > ux_threshold:\n\t\t\tleft_right = MAX_SPEED_ROT * ux / W * 2\n\t\t\tprint 'left_right: '+str(MAX_SPEED_ROT * ux / W * 2)\n\t\t\tmove_command = True\n\t\t\n\t\t#control height\n\t\tK_py=0.5\n\t\tK_dy=1.0\n\t\tK_iy=1.0\t\n\t\tuy_threshold = 0.1\t\t\n\t\t\t\t\n\t\t#control y\t\t\n\t\t#error for y between the desired and actual output\n\t\tey = 0 - y\n\t\tty = tx\n\t\n\t\t#Integration input\n\t\tuiy = uiy_prev + 1/K_iy * ty*ey\n\t\t#Derivation input\n\t\tudy = 1/K_dy * (ey-ey_prev)/ty\n\t\t\n\t\t#adjust previous values\n\t\tey_prev = ey\n\t\tuiy_prev = uiy\n\t\t\n\t\t#calculate input for the system\n\t\tuy = 2.0/H*K_py * (ey) #+ uiy + udy)\n\t\tif uy < -uy_threshold or uy > uy_threshold:\n\t\t\tup_down = MAX_SPEED_MOVE * uy\n\t\t\tmove_command = True\n\t\t\tprint 'up_down: '+str(MAX_SPEED_MOVE * uy)\n\t\t\n\t\t# control forward\n\t\tK_pf=0.4\n\t\tK_df=1.0\n\t\tK_if=1.0\t\n\t\tuf_threshold = 0.005\n\t\t\t\n\t\t#control f\n\t\t#error for f between the desired and actual output\n\t\tef = 0.2 - (area/(W*H)) **0.5\n\t\ttf = tx\n\t\t#Integration input\n\t\tuif = uif_prev + 1/K_if * tf*ef\n\t\t#Derivation input\n\t\tudf = 1/K_df * (ef-ef_prev)/tf\n\t\t\n\t\t#adjust previous values\n\t\tef_prev = ef\n\t\tuif_prev = uif\n\t\n\t\t#calculate input for the system\n\t\tuf = K_pf * (ef) #+ uif + udf)\n\t\t\n\t\tif uf < -uf_threshold or uf > uf_threshold:\n\t\t\tfront_back = MAX_SPEED_MOVE * uf \n\t\t\tmove_command = True\t\t\n\t\t\tprint 'front_back: '+str(MAX_SPEED_MOVE * uf)\n\n\t\t# apply control vectors\n\t\tdrone.at(libardrone.at_pcmd, move_command, 0, -front_back, up_down, -left_right)\n\n\ndef main():\n\tglobal drone\n\tglobal thread_lock\n\tglobal thread_lock_manual_mode\n\tglobal thread_lock_camera_frame\n\tglobal thread_lock_key\n\tglobal exiting\n\tglobal manual_mode\n\tglobal W\n\tglobal H\n\tglobal render_frame\n\tglobal new_frame\n\tglobal key\n\tglobal tx_prev\n\tglobal uix_prev\n\tglobal ex_prev\n\tglobal uif_prev\n\tglobal ef_prev\n\tglobal uiy_prev\n\tglobal ey_prev\n\n\t#initialization\n\tW, H = 640, 360\n\tkey = -1\n\t\n\tdrone = libardrone.ARDrone(True)\n\tdrone.reset()\n\t\n\texiting = False\n\tmanual_mode = True\n\tnew_frame = False\n\tthreads = []\n\tthread_lock = threading.Lock()\n\tthread_lock_manual_mode = threading.Lock()\n\tthread_lock_camera_frame = threading.Lock()\n\tthread_lock_key = threading.Lock()\n\n\ttx_prev = 0\n\tuix_prev = 0\n\tex_prev = 0\n\tuif_prev = 0\n\tef_prev = 0\n\tuiy_prev = 0\n\tey_prev = 0\t\t\n\t\n\t# Create new threads\n\tmanual_control_thread = manualControlThread(1, \"Manual Control Thread\")\n\tautomatic_control_thread = automaticControlThread(2, \"Automatic Control Thread\")\n\t\n\t# Start new Threads\n\tmanual_control_thread.start()\n\tautomatic_control_thread.start()\n\t\n\t# Add threads to thread list\n\tthreads.append(manual_control_thread)\n\tthreads.append(automatic_control_thread)\n\n\tthread_lock.acquire()\n\twhile not exiting:\t\t\n\t\tthread_lock.release()\n\n\t\t# wait for pressed key\n\t\tthread_lock_key.acquire()\n\t\tkey = cv2.waitKey(33)\n\t\tthread_lock_key.release()\n\n\t\t# display new frame from camera\n\t\tthread_lock_camera_frame.acquire()\n\t\tif new_frame:\n\t\t\tcv2.imshow('Drone',render_frame)\t\n\t\tthread_lock_camera_frame.release()\n\t\tthread_lock.acquire()\n\n\t# Wait for all threads to complete\n\tfor t in threads:\n\t\tt.join()\n\tprint \"Exiting Main Program\"\n\nif __name__ == '__main__':\n\tmain()\n" }, { "alpha_fraction": 0.748042106628418, "alphanum_fraction": 0.7507426142692566, "avg_line_length": 54.25373077392578, "blob_id": "144e4f16955b61fb838fd4ecdbd72058193e9f0c", "content_id": "f35dce8854480786713c1d58b452b77ee3657896", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3705, "license_type": "permissive", "max_line_length": 514, "num_lines": 67, "path": "/Yolonese/README.md", "repo_name": "xjsxujingsong/CDTM-Deep-Learning-Drones", "src_encoding": "UTF-8", "text": "YOLO Autonomous Drone - Deep Learning Person Detection\n===================\n\nThe YOLO Drone localizes and follows people with the help of the YOLO Deep Network. Often, more than just one person might be in the picture of the drone’s camera so a standard deep learning people/body recognition cannot deliver sufficient results. This is why we chose the color of the shirt worn by the respective person to be a second criterion. Hence, we require the \"operator\" of the drone to wear a shirt with a distinct yellow color. This turns out to be a suitable solution to the aforementioned problem. \n\n## Requirements\nTo run this project Keras and Theano are needed for the deeplearning part. Furthermore, a working libardrone must be installed. For shirt detection opencv must be installed on the system.\n\n> **Requirements list (+ all dependencies!) (python2.7):**\n> - keras (http://www.keras.io)\n> - theano (http://deeplearning.net/software/theano/)\n> - libardrone (https://github.com/venthur/python-ardrone)\n> - opencv (http://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_tutorials.html)\n\n## YOLO Network\nFor the YOLO network we tweaked the original implementation of https://github.com/sunshineatnoon/Darknet.keras. To run the Network with pretrained weights we suggest to use http://pjreddie.com/media/files/yolo-tiny.weights.\n\n## Run the project\nIf you have all requirements as defined above you can simple run the project by entering:\n```\n$ python drone.py\n```\nThis contains the main file of the drone. Please make sure that you have an active connection to the drone via wifi.\n\n## Switching between interfaces\nIf you want to switch between autonomous and manual flight you can simply change the main definition of drone.py by flipping the manual argument\n```\ndef main():\n drone = YOLODrone(manual=False)\n drone.start()\n```\n\n## Autonomous Interface\n\n![Detection 1](pictures/detection_1.png?raw=true \"Detection 1\") ![Detection 2](pictures/detection_2.png?raw=true \"Detection 2\")\n\nAs already described, the drone is looking for persons. The interface marks persons / groups of persons with red boxes. Additionally, a yellow t-shirt determines the real operator of the drone which is also highlighted in the interface. If more than one person wears a yellow shirt in the picture, the drone chooses the red box (person) that has the highest amount of yellow in them and continues to follow this particular person.\n\n## Manual Interface\nIf you don't press any key the drone will hover at its position. Use following keys to control the drone.\n\nKey | Function\n------- | ------- \nt | takeoff\n(space) | land\nw | move forward\ns | move backward\nd | move right\na | move left\n8 | move up\n2 | move down\ne | turn right\nq | turn left\nc | stop flight\n\n## Contributers\n - [Dominik Durner](https://github.com/durner)\n - [Christopher Helm](https://github.com/chrishelm)\n\n## Upstream Repository\nThe current master of this project can be found at https://github.com/durner/yolo-autonomous-drone\n\n## Files\n- drone.py : Main file of the project. Includes the manual interface, the glue code to the autonomous interface between YOLO Network and Actuators. All multithreading and OpenCV pre-processing is handled.\n- PID.py : simple PID controller interface to easily control the movements of the drone (incl. smoothing of the movements).\n- YOLO.py : Set up of the YOLO Deep network in python. The subfolder utils include further needed files for the YOLO net.\n- actuators.py : With the help of the localized operator the actuators calculate how the drone needs to move to center the operator and follow him. Uses PID controllers for calculating the movements.\n\n" }, { "alpha_fraction": 0.5642299652099609, "alphanum_fraction": 0.5869410634040833, "avg_line_length": 29, "blob_id": "4c759807a9a059c278fdf9b4ef0cf602b08b3f7b", "content_id": "de6a0cc906adbdd2ac51197420e113ec051d39e1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1409, "license_type": "permissive", "max_line_length": 112, "num_lines": 47, "path": "/Yolonese/actuators.py", "repo_name": "xjsxujingsong/CDTM-Deep-Learning-Drones", "src_encoding": "UTF-8", "text": "import time\nfrom libardrone import libardrone\n\nfrom PID import PID\n\nclass Actuator(object):\n def __init__(self, drone, picture_width, desired_move):\n self.turn = PID(K_p=0.6, K_d=0.1)\n self.move = PID(K_p=0.15, K_d=0.01)\n self.height = PID(K_p=0.2, K_d=0.00)\n self.picture_width = picture_width\n self.desired_move = desired_move\n self.drone = drone\n time.sleep(0.05)\n self.drone.takeoff()\n time.sleep(0.05)\n\n def step(self, wdithmid, width):\n desired_turn = self.picture_width / 2\n actual_turn = wdithmid\n actual_move = width\n\n ut = self.turn.step(desired_turn, actual_turn)\n\n um = self.move.step(self.desired_move, actual_move)\n\n height = 550\n nav_data = self.drone.get_navdata()\n nav_data = nav_data[0]\n uh = self.height.step(height, nav_data['altitude'])\n\n self.drone.at(libardrone.at_pcmd, True, 0, self.moveDrone(um), self.heightDrone(uh), self.turnDrone(ut))\n\n def turnDrone(self, u):\n speed = - u / (self.picture_width / 2.)\n print \"move horizontal to\" + str(speed)\n return speed\n\n def moveDrone(self, u):\n speed = - u / (self.picture_width / 2.)\n print \"move near to\" + str(speed)\n return speed\n\n def heightDrone(self, u):\n speed = u / 500\n print \"height near to\" + str(speed)\n return speed" }, { "alpha_fraction": 0.5397764444351196, "alphanum_fraction": 0.5613083243370056, "avg_line_length": 32.43406677246094, "blob_id": "c0784b4dff8b92549155eb594272a97c5b9465a3", "content_id": "9e57df7cd3e66a005099e167c9e486347aa47232", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6084, "license_type": "permissive", "max_line_length": 136, "num_lines": 182, "path": "/Yolonese/YOLO.py", "repo_name": "xjsxujingsong/CDTM-Deep-Learning-Drones", "src_encoding": "UTF-8", "text": "import os\nimport numpy as np\n\nfrom keras.models import Sequential\nfrom keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D, AveragePooling2D\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom keras.layers.core import Flatten, Dense, Activation, Reshape\n\nfrom math import pow\nimport theano\n\nfrom PIL import Image\nfrom PIL import ImageDraw\n\n\nclass box:\n def __init__(self,classes):\n self.x = 0\n self.y = 0\n self.h = 0\n self.w = 0\n self.class_num = 0\n self.probs = np.zeros((classes,1))\n\ndef SimpleNet(yoloNet):\n model = Sequential()\n\n #Convolution Layer 2 & Max Pooling Layer 3\n model.add(ZeroPadding2D(padding=(1,1),input_shape=(3,448,448)))\n model.add(Convolution2D(16, 3, 3, weights=[yoloNet.layers[1].weights,yoloNet.layers[1].biases],border_mode='valid',subsample=(1,1)))\n model.add(LeakyReLU(alpha=0.1))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n #Use a for loop to replace all manually defined layers\n for i in range(3,yoloNet.layer_number):\n l = yoloNet.layers[i]\n if(l.type == \"CONVOLUTIONAL\"):\n model.add(ZeroPadding2D(padding=(l.size//2,l.size//2,)))\n model.add(Convolution2D(l.n, l.size, l.size, weights=[l.weights,l.biases],border_mode='valid',subsample=(1,1)))\n model.add(LeakyReLU(alpha=0.1))\n elif(l.type == \"MAXPOOL\"):\n model.add(MaxPooling2D(pool_size=(2, 2),border_mode='valid'))\n elif(l.type == \"FLATTEN\"):\n model.add(Flatten())\n elif(l.type == \"CONNECTED\"):\n model.add(Dense(l.output_size, weights=[l.weights,l.biases]))\n elif(l.type == \"LEAKY\"):\n model.add(LeakyReLU(alpha=0.1))\n elif(l.type == \"DROPOUT\"):\n pass\n else:\n print \"Error: Unknown Layer Type\",l.type\n return model\n\ndef get_activations(model, layer, X_batch):\n get_activations = theano.function([model.layers[0].input], model.layers[layer].get_output(train=False), allow_input_downcast=True)\n activations = get_activations(X_batch) # same result as above\n return activations\n\ndef convert_yolo_detections(predictions,classes=20,num=2,square=True,side=7,w=1,h=1,threshold=0.2,only_objectness=0):\n boxes = []\n probs = np.zeros((side*side*num,classes))\n for i in range(side*side):\n row = i / side\n col = i % side\n for n in range(num):\n index = i*num+n\n p_index = side*side*classes+i*num+n\n scale = predictions[p_index]\n box_index = side*side*(classes+num) + (i*num+n)*4\n\n new_box = box(classes)\n new_box.x = (predictions[box_index + 0] + col) / side * w\n new_box.y = (predictions[box_index + 1] + row) / side * h\n new_box.h = pow(predictions[box_index + 2], 2) * w\n new_box.w = pow(predictions[box_index + 3], 2) * h\n\n for j in range(classes):\n class_index = i*classes\n prob = scale*predictions[class_index+j]\n if(prob > threshold):\n new_box.probs[j] = prob\n else:\n new_box.probs[j] = 0\n if(only_objectness):\n new_box.probs[0] = scale\n\n boxes.append(new_box)\n return boxes\n\ndef prob_compare(boxa,boxb):\n if(boxa.probs[boxa.class_num] < boxb.probs[boxb.class_num]):\n return 1\n elif(boxa.probs[boxa.class_num] == boxb.probs[boxb.class_num]):\n return 0\n else:\n return -1\n\ndef do_nms_sort(boxes,total,classes=20,thresh=0.5):\n for k in range(classes):\n for box in boxes:\n box.class_num = k\n sorted_boxes = sorted(boxes,cmp=prob_compare)\n for i in range(total):\n if(sorted_boxes[i].probs[k] == 0):\n continue\n boxa = sorted_boxes[i]\n for j in range(i+1,total):\n boxb = sorted_boxes[j]\n if(boxb.probs[k] != 0 and box_iou(boxa,boxb) > thresh):\n boxb.probs[k] = 0\n sorted_boxes[j] = boxb\n return sorted_boxes\n\ndef overlap(x1,w1,x2,w2):\n l1 = x1 - w1/2;\n l2 = x2 - w2/2;\n if(l1 > l2):\n left = l1\n else:\n left = l2\n r1 = x1 + w1/2;\n r2 = x2 + w2/2;\n if(r1 < r2):\n right = r1\n else:\n right = r2\n return right - left;\n\ndef box_intersection(a, b):\n w = overlap(a.x, a.w, b.x, b.w);\n h = overlap(a.y, a.h, b.y, b.h);\n if(w < 0 or h < 0):\n return 0;\n area = w*h;\n return area;\n\ndef box_union(a, b):\n i = box_intersection(a, b);\n u = a.w*a.h + b.w*b.h - i;\n return u;\n\ndef box_iou(a, b):\n return box_intersection(a, b)/box_union(a, b);\n\ndef draw_detections(impath,num,thresh,boxes,classes,labels,save_name):\n \"\"\"\n Args:\n impath: The image path\n num: total number of bounding boxes\n thresh: boxes prob beyond this thresh will be drawn\n boxes: boxes predicted by the network\n classes: class numbers of the objects\n \"\"\"\n img = Image.open(impath)\n drawable = ImageDraw.Draw(img)\n ImageSize = img.size\n for i in range(num):\n #for each box, find the class with maximum prob\n max_class = np.argmax(boxes[i].probs)\n prob = boxes[i].probs[max_class]\n if(prob > thresh and labels[max_class] == \"person\"):\n b = boxes[i]\n\n temp = b.w\n b.w = b.h\n b.h = temp\n\n left = (b.x-b.w/2.)*ImageSize[0];\n right = (b.x+b.w/2.)*ImageSize[0];\n top = (b.y-b.h/2.)*ImageSize[1];\n bot = (b.y+b.h/2.)*ImageSize[1];\n\n if(left < 0): left = 0;\n if(right > ImageSize[0]-1): right = ImageSize[0]-1;\n if(top < 0): top = 0;\n if(bot > ImageSize[1]-1): bot = ImageSize[1]-1;\n\n # print \"The four cords are: \",left,right,top,bot\n drawable.rectangle([left,top,right,bot],outline=\"red\")\n img.save(\"results/\" + save_name)\n # print labels[max_class],\": \",boxes[i].probs[max_class]" }, { "alpha_fraction": 0.5402876734733582, "alphanum_fraction": 0.5638537406921387, "avg_line_length": 38.45774459838867, "blob_id": "a6bb865401fb99c2bff2c63fe20c18a4aef69020", "content_id": "0c32a9f67fc760b911c567c21b634cda9d6ea9e1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5771, "license_type": "permissive", "max_line_length": 147, "num_lines": 142, "path": "/C-HAWK/CentralControl.py", "repo_name": "xjsxujingsong/CDTM-Deep-Learning-Drones", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Aug 10 11:48:48 2016\r\n\r\n@author: Christian\r\n\"\"\"\r\nfrom PIDController import PID_Controller\r\nfrom libardrone import libardrone\r\nimport patternRecognition \r\nimport time\r\nimport cv2\r\nimport numpy as np\r\n\r\nclass CentralControl(object):\r\n # Controller parameter\r\n x_PIDController=PID_Controller(1,0.0,2.5,\"xController\")\r\n y_PIDController=PID_Controller(1.5,0.0,2.25,\"yController\")\r\n bf_PIDController=PID_Controller(1,0.0,3,\"bfController\")\r\n speedRange = [0.45,0.15,0.375,0.1] #turn x, move y, go forward, move x ## Stable values [0.25,0.15,0.2,0.05]\r\n maxPIDValue = [200,200,200,200]\r\n x_offset=0.003\r\n \r\n \r\n def __init__(self,standardXCoord,standardYCoord,standardSize):\r\n self.standardXCoord=standardXCoord\r\n self.standardYCoord=standardYCoord\r\n self.standardSize=standardSize\r\n \r\n \r\n #Image Recognition returns left upper corner coordinates and right downer corner coordinates\r\n \r\n def computeSize(self,xCoordinate_leftUpperCorner,yCoordinate_leftUpperCorner,xCoordinate_rightDownerCorner,yCoordinate_rightDownerCorner):\r\n return ((xCoordinate_leftUpperCorner-xCoordinate_rightDownerCorner)**2+(yCoordinate_leftUpperCorner-yCoordinate_rightDownerCorner)**2)**0.5\r\n \r\n def reciprocalSize(self,desiredValue,actualValue):\r\n if actualValue<desiredValue:\r\n return 2*desiredValue-(desiredValue**2)/actualValue\r\n else:\r\n return actualValue\r\n \r\n def controlLoop(self):\r\n drone=libardrone.ARDrone(True) \r\n drone.reset()\r\n\r\n frame=drone.get_image()\r\n cv2.imshow('img',frame)\r\n #Wait for any key to start over\r\n cv2.waitKey(0)\r\n \r\n drone.takeoff()\r\n print \"Takeoff\"\r\n \r\n logFilePIDPath=\"logFilePID_11.csv\"\r\n logFilePID=open(logFilePIDPath,\"a\")\r\n logFileCmdPath=\"logFileCmd_11.csv\"\r\n logFileCmd=open(logFileCmdPath,\"a\")\r\n\r\n logFilePID.write(\"\\n\\n=================================================================================\\n\")\r\n logFileCmd.write(\"\\n\\n=================================================================================\\n\")\r\n \r\n running=True\r\n\r\n while running:\r\n key=cv2.waitKey(5)\r\n if key==32:\r\n print \"Land drone\"\r\n running=False\r\n drone.land()\r\n \r\n frame=drone.get_image()\r\n \r\n # call imageRec\r\n xlu,ylu,xrd,yrd=patternRecognition.cornerPointsChess(frame,logFileCmd)\r\n if not(xlu==-1 and ylu==-1 and xrd==-1 and yrd==-1): \r\n # computeSize\r\n currentsize=self.computeSize(xlu,ylu,xrd,yrd)\r\n recipSize = self.reciprocalSize(self.standardSize,currentsize)\r\n xAvg = (xlu+xrd)/2.0\r\n yAvg = (ylu+yrd)/2.0\r\n # call PIDController\r\n x_PIDValue=self.x_PIDController.pidControl(self.standardXCoord,xAvg)\r\n y_PIDValue=self.y_PIDController.pidControl(self.standardYCoord,yAvg)\r\n bf_PIDValue=self.bf_PIDController.pidControl(self.standardSize,recipSize)\r\n #log-file entries\r\n self.logFileWrite(logFileCmd,\"x_PID: \"+str(x_PIDValue))\r\n self.logFileWrite(logFileCmd,\"y_PID: \"+str(y_PIDValue))\r\n self.logFileWrite(logFileCmd,\"bf_PID: \"+str(bf_PIDValue)) \r\n self.logFileWrite(logFilePID,str(x_PIDValue)+\",\"+str(y_PIDValue)+\",\"+str(bf_PIDValue))\r\n # Actuate \r\n xSpeed,ySpeed,bfSpeed,x2Speed = self.calcSpeed(x_PIDValue,y_PIDValue,bf_PIDValue)\r\n self.actuateAll(x2Speed,xSpeed,ySpeed,bfSpeed,drone)\r\n else:\r\n drone.hover()\r\n pass\r\n \r\n time.sleep(0.01)\r\n \r\n #Close log-files\r\n logFilePID.close \r\n logFileCmd.close \r\n print \"Drone landed\" \r\n \r\n print(\"Shutting down...\")\r\n drone.halt()\r\n print(\"Ok.\")\r\n \r\n\r\n def calcSpeed(self,x_PIDValue,y_PIDValue,bf_PIDValue):\r\n # x-speed: change sign PIDValue>0 <=> speed<0\r\n if abs(x_PIDValue)>self.maxPIDValue[0]: \r\n xSpeed=-np.sign(x_PIDValue)*self.speedRange[0]\r\n else:\r\n xSpeed=-x_PIDValue/self.maxPIDValue[0]*self.speedRange[0]\r\n # y-speed: keep sign PIDValue>0 <=> speed>0\r\n if abs(y_PIDValue)>self.maxPIDValue[1]: \r\n ySpeed=np.sign(y_PIDValue)*self.speedRange[1]\r\n else:\r\n ySpeed=y_PIDValue/self.maxPIDValue[1]*self.speedRange[1]\r\n # bf-speed: change sign PIDValue>0 <=> speed<0\r\n if abs(bf_PIDValue)>self.maxPIDValue[2]: \r\n bfSpeed=-np.sign(bf_PIDValue)*self.speedRange[2]\r\n else:\r\n bfSpeed=-bf_PIDValue/self.maxPIDValue[2]*self.speedRange[2]\r\n # x-speed: change sign PIDValue>0 <=> speed<0\r\n if abs(x_PIDValue)>self.maxPIDValue[3]: \r\n x2Speed=-np.sign(x_PIDValue)*self.speedRange[3]\r\n else:\r\n x2Speed=-x_PIDValue/self.maxPIDValue[3]*self.speedRange[3]\r\n return xSpeed,ySpeed,bfSpeed,x2Speed\r\n \r\n \r\n def actuateAll(self,x2Speed,xSpeed,ySpeed,bfSpeed,drone):\r\n drone.at(libardrone.at_pcmd, True, x2Speed+self.x_offset, bfSpeed, ySpeed, xSpeed)\r\n \r\n def logFileWrite(self,file,msg):\r\n pass\r\n# file.write(\"%s,%s\\n\" % (str(time.time()), msg))\r\n \r\n\r\n\r\ncontrol=CentralControl(320,180,80) #Parameter of ideal position (x,y and z direction [px])\r\ncontrol.controlLoop()\r\n \r\n \r\n \r\n" }, { "alpha_fraction": 0.7727649211883545, "alphanum_fraction": 0.7798013091087341, "avg_line_length": 64.27027130126953, "blob_id": "a1f18284354e908c2dc9951516012537f2184d93", "content_id": "feb4cda560c5910a296b562b1ea733050fae4755", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2416, "license_type": "permissive", "max_line_length": 382, "num_lines": 37, "path": "/EAGLE-EYE/README.md", "repo_name": "xjsxujingsong/CDTM-Deep-Learning-Drones", "src_encoding": "UTF-8", "text": "# EAGLE-EYE\n\nEAGLE-EYE was developed during the [CDTM Deep Learning Drones Elective](https://github.com/PatrickChrist/CDTM-Deep-Learning-Drones) and allows to control the AR.Drone 2. The drone can be remote controlled using the camera stream and the keyboard of the PC or switched into autonomous flight mode where it follows a 6x6 [ArUco](http://www.uco.es/investiga/grupos/ava/node/26) marker.\n\nThis project combines the features of a low level control library [libardrone](https://github.com/mjdev/python-ardrone) with the marker tracking functionalities of [OpenCV 3.1](http://opencv.org/opencv-3-1.html) and a software [PID controller](https://en.wikipedia.org/wiki/PID_controller) to achieve autonomous behavior.\nThis drone control mainly uses the libraries OpenCV and [libardrone](https://github.com/venthur/python-ardrone).\nOnce in autonomous mode, the drone follows the biggest visible ArUco marker. The controller determines it's steering commands the following way:\n* Area of the marker: forward/backwards flight\n* x-Axis offset to the frame's center: turn left/right\n* y-Axis offset to the frame's center: move up/down\nSuggested improvements:\n* add left/right movement without turning\n* optimize the controller values of the PID controller\n\n## Setup and Run EAGLE-EYE:\nThe code was developed for the AR.Drone 2 using Python 2.7 and OpenCV 3.1.0.\nThe GitHub project [libardrone](https://github.com/mjdev/python-ardrone) provides a set of low level control functions that are used to control the drone. This library must be installed before being able to execute EAGLE-EYE.\nIf the libardrone library is contained in your python environment, the EAGLE-EYE repository can be cloned into a new directory and started by executing eagle_eye.py.\nMake sure that you are connected to the drone's WiFi!\n\nPress the return key to take off and control the drone using the following keys:\n* UP - fly higher\n* DOWN -fly lower\n* LEFT - turn left\n* RIGHT - turn right\n* W - move forward\n* S - move backwards\n* A - move to the left\n* D - move to the right\n* M - toggle between manual and autonomous control\n* SPACE - landing and shutdown\n\nNote: Depending on your setup you might have to adjust the keycodes for the arrow keys since those might be different to the ones used on our machines.\n\nWhile in autonomous mode, the drone will track and follow the biggest visible 6x6 ArUco marker.\n\n**_Authors:_** Hagen Schmidtchen and Isabel Poppek\n\n" }, { "alpha_fraction": 0.5995671153068542, "alphanum_fraction": 0.6428571343421936, "avg_line_length": 27.875, "blob_id": "bc60f3ac150fe463c8d9981c508eb5c34848ae9f", "content_id": "e8bf95957cc62062b001db5216c16b877ca6cd72", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1386, "license_type": "permissive", "max_line_length": 81, "num_lines": 48, "path": "/Yolonese/utils/crop.py", "repo_name": "xjsxujingsong/CDTM-Deep-Learning-Drones", "src_encoding": "UTF-8", "text": "from PIL import Image\nimport numpy as np\nfrom scipy import misc\nimport os\n\ndef crop(imPath,resize_width=256,resize_height=256,new_width=224,new_height=224):\n im = Image.open(imPath)\n im = im.resize((resize_width,resize_height),Image.ANTIALIAS)\n\n #central crop 224,224\n width, height = im.size # Get dimensions\n\n left = (width - new_width)/2\n top = (height - new_height)/2\n right = (width + new_width)/2\n bottom = (height + new_height)/2\n\n im = im.crop((left, top, right, bottom))\n\n im.save(\"frames/cropped.jpg\")\n\n image_array = np.array(im)\n image_array = np.rollaxis(image_array,2,0)\n image_array = image_array/255.0\n image_array = image_array * 2.0 - 1.0\n return image_array\n\ndef crop_detection(imPath,new_width=448,new_height=448,save=False,test=False):\n im = Image.open(imPath)\n im = im.resize((new_width,new_height),Image.ANTIALIAS)\n\n image_array = np.array(im)\n image_array = np.rollaxis(image_array,2,0)\n image_array = image_array/255.0\n image_array = image_array * 2.0 - 1.0\n\n if(test):\n image_array = (image_array + 1.0) / 2.0 * 225.0\n image_array = np.rollaxis(image_array,2,0)\n image_array = np.rollaxis(image_array,2,0)\n print image_array.shape\n\n misc.imsave('recovered.jpg', image_array)\n\n if(save):\n return image_array,im\n else:\n return image_array\n" }, { "alpha_fraction": 0.5573068261146545, "alphanum_fraction": 0.5794072151184082, "avg_line_length": 34.37575912475586, "blob_id": "9755f6a8b638572a3eb37d0383820e1ed17950af", "content_id": "271fddea72f1d1bcf0f6e2a12c3c17d0ef0755e4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5837, "license_type": "permissive", "max_line_length": 208, "num_lines": 165, "path": "/Yolonese/utils/ReadPascalforLoc.py", "repo_name": "xjsxujingsong/CDTM-Deep-Learning-Drones", "src_encoding": "UTF-8", "text": "import os\nimport xml.etree.ElementTree as ET\nfrom crop import crop_detection\nimport numpy as np\nfrom PIL import Image\nfrom PIL import ImageDraw\nimport scipy\nimport random\n\n\nvocPath = os.path.abspath(os.path.join(os.getcwd(),os.path.pardir,'dataset'))\n\nclass box:\n def __init__(self,x,y,h,w):\n self.x = x\n self.y = y\n self.h = h\n self.w = w\n\ndef overlap(x1,w1,x2,w2):\n l1 = x1 - w1/2;\n l2 = x2 - w2/2;\n if(l1 > l2):\n left = l1\n else:\n left = l2\n r1 = x1 + w1/2;\n r2 = x2 + w2/2;\n if(r1 < r2):\n right = r1\n else:\n right = r2\n return right - left;\n\ndef box_intersection(a, b):\n w = overlap(a.x, a.w, b.x, b.w);\n h = overlap(a.y, a.h, b.y, b.h);\n if(w < 0 or h < 0):\n return 0;\n area = w*h;\n return area;\n\ndef box_union(a, b):\n i = box_intersection(a, b);\n u = a.w*a.h + b.w*b.h - i;\n return u;\n\ndef box_iou(a, b):\n return box_intersection(a, b)/box_union(a, b);\n\nclass image():\n \"\"\"\n Args:\n side: An image is divided into side*side grids\n Each image class has two variables:\n imgPath: the path of an image on my computer\n bboxes: a side*side matrix, each element in the matrix is cell\n \"\"\"\n def __init__(self,side,imgPath):\n self.imgPath = imgPath\n self.boxes = np.zeros((side,side))\n\n def parseXML(self,xmlPath,labels,side):\n \"\"\"\n Args:\n xmlPath: The path of the xml file of this image\n labels: label names of pascal voc dataset\n side: an image is divided into side*side grid\n \"\"\"\n tree = ET.parse(xmlPath)\n root = tree.getroot()\n\n width = int(root.find('size').find('width').text)\n height = int(root.find('size').find('height').text)\n\n for obj in root.iter('object'):\n class_num = labels.index(obj.find('name').text)\n bndbox = obj.find('bndbox')\n left = int(bndbox.find('xmin').text)\n top = int(bndbox.find('ymin').text)\n right = int(bndbox.find('xmax').text)\n down = int(bndbox.find('ymax').text)\n\n #trans the coords to 448*448\n left = left*1.0 / width * 448\n right = right*1.0 / width * 448\n top = top*1.0 / height * 448\n down = down*1.0 / height * 448\n\n boxa = box((left+right)/2,(top+down)/2,down-top,right-left)\n for i in range(int(left/64),int(right/64)+1):\n for j in range(int(top/64),int(down/64)+1):\n box_left = i*64\n box_right = (i+1)*64\n box_top = j*64\n box_down = (j+1)*64\n boxb = box((box_left+box_right)/2,(box_top+box_down)/2,64,64)\n iou = box_intersection(boxa,boxb)\n if(iou/(64*64) > 0.25):\n self.boxes[j][i] = 1\n\ndef prepareBatch(start,end,imageNameFile,vocPath):\n \"\"\"\n Args:\n start: the number of image to start\n end: the number of image to end\n imageNameFile: the path of the file that contains image names\n vocPath: the path of pascal voc dataset\n Funs:\n generate a batch of images from start~end\n Returns:\n A list of end-start+1 image objects\n \"\"\"\n imageList = []\n labels = [\"aeroplane\", \"bicycle\", \"bird\", \"boat\", \"bottle\", \"bus\", \"car\", \"cat\", \"chair\", \"cow\", \"diningtable\", \"dog\", \"horse\", \"motorbike\", \"person\", \"pottedplant\", \"sheep\", \"sofa\", \"train\",\"tvmonitor\"]\n file = open(imageNameFile)\n imageNames = file.readlines()\n for i in range(start,end):\n imgName = imageNames[i].strip('\\n')\n imgPath = os.path.join(vocPath,'JPEGImages',imgName)+'.jpg'\n xmlPath = os.path.join(vocPath,'Annotations',imgName)+'.xml'\n img = image(side=7,imgPath=imgPath)\n img.parseXML(xmlPath,labels,7)\n imageList.append(img)\n\n return imageList\n\n#Prepare training data\ndef generate_batch_data(vocPath,imageNameFile,batch_size,sample_number):\n \"\"\"\n Args:\n vocPath: the path of pascal voc data\n imageNameFile: the path of the file of image names\n batchsize: batch size, sample_number should be divided by batchsize\n Funcs:\n A data generator generates training batch indefinitely\n \"\"\"\n class_num = 20\n #Read all the data once and dispatch them out as batches to save time\n TotalimageList = prepareBatch(0,sample_number,imageNameFile,vocPath)\n\n while 1:\n batches = sample_number // batch_size\n for i in range(batches):\n images = []\n boxes = []\n sample_index = np.random.choice(sample_number,batch_size,replace=True)\n #sample_index = [3]\n for ind in sample_index:\n image = TotalimageList[ind]\n #print image.imgPath\n image_array = crop_detection(image.imgPath,new_width=448,new_height=448)\n #image_array = np.expand_dims(image_array,axis=0)\n\n images.append(image_array)\n boxes.append(np.reshape(image.boxes,-1))\n #return np.asarray(images),np.asarray(boxes)\n yield np.asarray(images),np.asarray(boxes)\n\nif __name__ == '__main__':\n img,boxes = generate_batch_data('/home/media/Documents/YOLO.keras/dataset/train_val/','/home/media/Documents/YOLO.keras/utils/image_name',1,1)\n #labels = [\"aeroplane\", \"bicycle\", \"bird\", \"boat\", \"bottle\", \"bus\", \"car\", \"cat\", \"chair\", \"cow\", \"diningtable\", \"dog\", \"horse\", \"motorbike\", \"person\", \"pottedplant\", \"sheep\", \"sofa\", \"train\",\"tvmonitor\"]\n #img = image(side=7,imgPath='/home/media/Documents/YOLO.keras/dataset/train_val/JPEGImages/000011.jpg')\n #img.parseXML(xmlPath='/home/media/Documents/YOLO.keras/dataset/VOCdevkit/VOC2007/Annotations/000011.xml',labels=labels,side=7)\n print boxes\n" }, { "alpha_fraction": 0.7602739930152893, "alphanum_fraction": 0.767123281955719, "avg_line_length": 57.400001525878906, "blob_id": "ed01f11159f8f2e469365c1dfc9d3b57bf95f3d8", "content_id": "fa7ce96362d2ad554af0505047baa71e3754dec8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 876, "license_type": "permissive", "max_line_length": 202, "num_lines": 15, "path": "/Teams/Arucinator/README.md", "repo_name": "xjsxujingsong/CDTM-Deep-Learning-Drones", "src_encoding": "UTF-8", "text": "# Drone Elective at the _Center for Digital Technology and Management_ (CDTM)\n## Contributors\n* [Daisy Paiva](mailto:[email protected])\n* [Martin Patz](mailto:[email protected])\n \n## Approach\nOur main approach was to use [Aruco markers](http://docs.opencv.org/3.1.0/d5/dae/tutorial_aruco_detection.html) and a [PID](https://en.wikipedia.org/wiki/PID_controller) controller to control the Drone.\nFor that we used the [libardrone](https://github.com/venthur/python-ardrone/blob/master/libardrone.py) library.\nThe Aruco markers are detected with the _opencv_ library.\n\n## Outcome\nWe delivered a quite good race.\nEventually we finished in place 5 out of 9.\nOur main issue were connection problems between the Drone and the Notebook, running the python script.\nThis was especially severe in the _TUM Maschinenbau_ building, because many people, many phones, and many Drones (each hosting its own WiFi hotspot).\n" }, { "alpha_fraction": 0.6632934808731079, "alphanum_fraction": 0.7203311920166016, "avg_line_length": 28.351350784301758, "blob_id": "a27219e3836677b8cb01470a30c3250f7c9ac3e0", "content_id": "6df02f34894bed3de14af6be7e5ca984978920f2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2174, "license_type": "permissive", "max_line_length": 134, "num_lines": 74, "path": "/README.md", "repo_name": "xjsxujingsong/CDTM-Deep-Learning-Drones", "src_encoding": "UTF-8", "text": "# CDTM-Deep-Learning-Drones\nCourse Material for CDTM Deep Learning Drones Course\n## Citation\nIf you find this useful for your research please cite our paper:\n```\n\n@inbook{Christ2016,\n\tAuthor = {Christ, Patrick Ferdinand and Lachner, Florian and H{\\\"o}sl, Axel and Menze, Bjoern and Diepold, Klaus and Butz, Andreas},\n\tBooktitle = {Computer Vision -- ECCV 2016 Workshops: Amsterdam, The Netherlands, October 8-10 and 15-16, 2016, Proceedings, Part II},\n\tDoi = {10.1007/978-3-319-48881-3_17},\n\tEditor = {Hua, Gang and J{\\'e}gou, Herv{\\'e}},\n\tIsbn = {978-3-319-48881-3},\n\tPages = {238--253},\n\tPublisher = {Springer International Publishing},\n\tTitle = {Human-Drone-Interaction: A Case Study to Investigate the Relation Between Autonomy and User Experience},\n\tUrl = {http://dx.doi.org/10.1007/978-3-319-48881-3_17},\n\tYear = {2016},\n\tBdsk-Url-1 = {http://dx.doi.org/10.1007/978-3-319-48881-3_17}}\n\n```\n## Usage Docker Container\n### Build from scratch using\n```\nnvidia-docker build -t YOURNICENAMEFORTHECONTAINER .\n```\n### Pull from Dockerhub.com\n```\nnvidia-docker pull patrickchrist/cdtm-deep-learning-drones\n```\n(or CPU-only:)\n```\ndocker pull mjdev/cdtm-deep-learning-drones\n```\n### Start Docker Container Using\nWith Nvidia GPU:\n```\nsudo nvidia-docker run -v /home/$(whoami):/data -p 5000:5000 patrickchrist/cdtm-deep-learning-drones\n```\nor CPU-only:\n```\ndocker run -v /home/$(whoami):/data -p 5000:5000 mjdev/cdtm-deep-learning-drones\n```\n\n### Enter a running Docker Container\nGet the docker container id and remember it.\n```\nsudo nvidia-docker ps\n```\nLogin using the following command.\n```\nsudo docker exec -it CONTAINERID bash\n```\n## Python ARDrone Lib\nTo install the Python ARDrone Lib for Windows follow please the README in /python-ardrone.\n\n## Python ARDone Lib PS-Drone\nSecond Lib to control the drone.\n## Object Detection Tutorial Using Digits DetectNET\n### Download Data from Kitti\n```\nLabels\nhttp://kitti.is.tue.mpg.de/kitti/data_object_label_2.zip\n```\n```\nData\nhttp://kitti.is.tue.mpg.de/kitti/data_object_image_2.zip\n```\n```\nInfos\nhttp://kitti.is.tue.mpg.de/kitti/devkit_object.zip\n```\n### Follow Digits Tutorial\nFollow the Digits Object Detection Tutorial.\n[https://github.com/NVIDIA/DIGITS/tree/master/examples/object-detection]\n\n\n" }, { "alpha_fraction": 0.5070009827613831, "alphanum_fraction": 0.526864230632782, "avg_line_length": 35.12941360473633, "blob_id": "e58aba680a8074f410d86d63dd29cb0b929b9195", "content_id": "61db6146f5129202ca6d302f12d1158ea869736d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9213, "license_type": "permissive", "max_line_length": 207, "num_lines": 255, "path": "/Yolonese/utils/ReadPascalVoc2.py", "repo_name": "xjsxujingsong/CDTM-Deep-Learning-Drones", "src_encoding": "UTF-8", "text": "import os\nimport xml.etree.ElementTree as ET\nfrom crop import crop_detection\nimport numpy as np\nfrom PIL import Image\nfrom PIL import ImageDraw\nimport scipy\nimport random\n\n\nvocPath = os.path.abspath(os.path.join(os.getcwd(),os.path.pardir,'dataset'))\n\nclass objInfo():\n \"\"\"\n objInfo saves the information of an object, including its class num, its cords\n \"\"\"\n def __init__(self,x,y,h,w,class_num):\n self.x = x\n self.y = y\n self.h = h\n self.w = w\n self.class_num = class_num\n\nclass Cell():\n \"\"\"\n A cell is a grid cell of an image, it has a boolean variable indicating whether there are any objects in this cell,\n and a list of objInfo objects indicating the information of objects if there are any\n \"\"\"\n def __init__(self):\n self.has_obj = False\n self.objs = []\n\nclass image():\n \"\"\"\n Args:\n side: An image is divided into side*side grids\n Each image class has two variables:\n imgPath: the path of an image on my computer\n bboxes: a side*side matrix, each element in the matrix is cell\n \"\"\"\n def __init__(self,side,imgPath):\n self.imgPath = imgPath\n self.boxes = []\n for i in range(side):\n rows = []\n for j in range(side):\n rows.append(Cell())\n self.boxes.append(rows)\n\n def parseXML(self,xmlPath,labels,side):\n \"\"\"\n Args:\n xmlPath: The path of the xml file of this image\n labels: label names of pascal voc dataset\n side: an image is divided into side*side grid\n \"\"\"\n tree = ET.parse(xmlPath)\n root = tree.getroot()\n\n width = int(root.find('size').find('width').text)\n height = int(root.find('size').find('height').text)\n\n for obj in root.iter('object'):\n class_num = labels.index(obj.find('name').text)\n bndbox = obj.find('bndbox')\n xmin = int(bndbox.find('xmin').text)\n ymin = int(bndbox.find('ymin').text)\n xmax = int(bndbox.find('xmax').text)\n ymax = int(bndbox.find('ymax').text)\n h = ymax-ymin\n w = xmax-xmin\n #objif = objInfo(xmin/448.0,ymin/448.0,np.sqrt(ymax-ymin)/448.0,np.sqrt(xmax-xmin)/448.0,class_num)\n\n #which cell this obj falls into\n centerx = (xmax+xmin)/2.0\n centery = (ymax+ymin)/2.0\n newx = (448.0/width)*centerx\n newy = (448.0/height)*centery\n\n h_new = h * (448.0 / height)\n w_new = w * (448.0 / width)\n\n cell_size = 448.0/side\n col = int(newx / cell_size)\n row = int(newy / cell_size)\n # print \"row,col:\",row,col,centerx,centery\n\n cell_left = col * cell_size\n cell_top = row * cell_size\n cord_x = (newx - cell_left) / cell_size\n cord_y = (newy - cell_top)/ cell_size\n\n objif = objInfo(cord_x,cord_y,np.sqrt(h_new/448.0),np.sqrt(w_new/448.0),class_num)\n self.boxes[row][col].has_obj = True\n self.boxes[row][col].objs.append(objif)\n\ndef prepareBatch(start,end,imageNameFile,vocPath):\n \"\"\"\n Args:\n start: the number of image to start\n end: the number of image to end\n imageNameFile: the path of the file that contains image names\n vocPath: the path of pascal voc dataset\n Funs:\n generate a batch of images from start~end\n Returns:\n A list of end-start+1 image objects\n \"\"\"\n imageList = []\n labels = [\"aeroplane\", \"bicycle\", \"bird\", \"boat\", \"bottle\", \"bus\", \"car\", \"cat\", \"chair\", \"cow\", \"diningtable\", \"dog\", \"horse\", \"motorbike\", \"person\", \"pottedplant\", \"sheep\", \"sofa\", \"train\",\"tvmonitor\"]\n file = open(imageNameFile)\n imageNames = file.readlines()\n for i in range(start,end):\n imgName = imageNames[i].strip('\\n')\n imgPath = os.path.join(vocPath,'JPEGImages',imgName)+'.jpg'\n xmlPath = os.path.join(vocPath,'Annotations',imgName)+'.xml'\n img = image(side=7,imgPath=imgPath)\n img.parseXML(xmlPath,labels,7)\n imageList.append(img)\n\n return imageList\n\n#Prepare training data\ndef generate_batch_data(vocPath,imageNameFile,batch_size,sample_number):\n \"\"\"\n Args:\n vocPath: the path of pascal voc data\n imageNameFile: the path of the file of image names\n batchsize: batch size, sample_number should be divided by batchsize\n Funcs:\n A data generator generates training batch indefinitely\n \"\"\"\n class_num = 20\n #Read all the data once and dispatch them out as batches to save time\n TotalimageList = prepareBatch(0,sample_number,imageNameFile,vocPath)\n\n while 1:\n batches = sample_number // batch_size\n for i in range(batches):\n images = []\n boxes = []\n sample_index = np.random.choice(sample_number,batch_size,replace=True)\n #sample_index = [3]\n for ind in sample_index:\n image = TotalimageList[ind]\n #print image.imgPath\n image_array = crop_detection(image.imgPath,new_width=448,new_height=448)\n #image_array = np.expand_dims(image_array,axis=0)\n\n y = []\n for i in range(7):\n for j in range(7):\n box = image.boxes[i][j]\n '''\n ############################################################\n #x,y,h,w,one_hot class label vector[0....0],objectness{0,1}#\n ############################################################\n '''\n if(box.has_obj):\n obj = box.objs[0]\n\n y.append(obj.x)\n y.append(obj.y)\n y.append(obj.h)\n y.append(obj.w)\n\n labels = [0]*20\n labels[obj.class_num] = 1\n y.extend(labels)\n y.append(1) #objectness\n else:\n y.extend([0]*25)\n y = np.asarray(y)\n #y = np.reshape(y,[1,y.shape[0]])\n\n images.append(image_array)\n boxes.append(y)\n #return np.asarray(images),np.asarray(boxes)\n yield np.asarray(images),np.asarray(boxes)\n\nif __name__ == '__main__':\n imageNameFile='/home/media/Documents/YOLO.keras/dataset/train_val/SingleImageNameFile.txt'\n vocPath='/home/media/Documents/YOLO.keras/dataset/train_val'\n '''\n imageList = prepareBatch(0,2,imageNameFile,vocPath)\n for i in range(0,2):\n img = imageList[i]\n print img.imgPath\n boxes = img.boxes\n for i in range(7):\n for j in range(7):\n if(boxes[i][j].has_obj):\n print i,j\n objs = boxes[i][j].objs\n for obj in objs:\n print obj.class_num\n print obj.x\n print obj.y\n print\n '''\n image_array,y = generate_batch_data(vocPath,imageNameFile,1,sample_number=16)\n print image_array.shape,y.shape\n #print image_array[0,...,...,...].shape\n #let's see if we read correctly\n image_array = image_array[0,...,...,...]\n #scipy.misc.imsave('recovered.jpg', image_array)\n print image_array.shape\n image_array = (image_array + 1.0) / 2.0 * 225.0\n image_array = np.rollaxis(image_array,2,0)\n image_array = np.rollaxis(image_array,2,0)\n print image_array.shape\n\n scipy.misc.imsave('recovered.jpg', image_array)\n # center should be in (3,3)\n labels = [\"aeroplane\", \"bicycle\", \"bird\", \"boat\", \"bottle\", \"bus\", \"car\", \"cat\", \"chair\", \"cow\", \"diningtable\", \"dog\", \"horse\", \"motorbike\", \"person\", \"pottedplant\", \"sheep\", \"sofa\", \"train\",\"tvmonitor\"]\n out = y[0]\n\n imgPath = os.path.join(os.getcwd(),'recovered.jpg')\n img = Image.open(imgPath)\n image_arr,img_resize = crop_detection(imgPath,448,448,save=True)\n drawable = ImageDraw.Draw(img_resize)\n #Draw orignal bounding boxes\n\n count = 0\n for i in range(49):\n preds = out[i*25:(i+1)*25]\n if(preds[24] > 0.3):\n count = count + 1\n #print preds[0:4],preds[24]\n row = i/7\n col = i%7\n print row,col\n centerx = 64 * col + 64 * preds[0]\n centery = 64 * row + 64 * preds[1]\n\n h = preds[2] * preds[2]\n h = h * 448.0\n w = preds[3] * preds[3]\n w = w * 448.0\n\n left = centerx - w/2.0\n right = centerx + w/2.0\n up = centery - h/2.0\n down = centery + h/2.0\n\n if(left < 0): left = 0\n if(right > 448): right = 447\n if(up < 0): up = 0\n if(down > 448): down = 447\n\n drawable.rectangle([left,up,right,down],outline='red')\n print 'Class is: ',labels[np.argmax(preds[4:24])]\n print np.max(preds[4:24])\n print count\n img_resize.save(os.path.join(os.getcwd(),'recover.jpg'))\n" }, { "alpha_fraction": 0.5038039088249207, "alphanum_fraction": 0.533389687538147, "avg_line_length": 24.659090042114258, "blob_id": "ea89e9f45bd722f5eb8bd075b1431308db853c48", "content_id": "c3711147bd4f934aacd60974baea0c95eb8aa7ba", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1183, "license_type": "permissive", "max_line_length": 90, "num_lines": 44, "path": "/C-HAWK/PIDController.py", "repo_name": "xjsxujingsong/CDTM-Deep-Learning-Drones", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Aug 10 11:19:19 2016\r\n\r\n@author: Christian\r\n\"\"\"\r\nimport time\r\n\r\nclass PID_Controller(object):\r\n\r\n \r\n k_p=1\r\n k_i=1\r\n k_d=1\r\n \r\n sum_value_difference=0\r\n old_value=-1\r\n old_time=-1\r\n kind_of_controller='Default'\r\n def __init__(self,k_p,k_i,k_d,kind_of_controller):\r\n self.k_p=k_p\r\n self.k_i=k_i\r\n self.k_d=k_d\r\n self.kind_of_controller=kind_of_controller\r\n\r\n def pidControl(self,desiredValue, actualValue):\r\n valueDifferenz= desiredValue - actualValue\r\n \r\n y_p=self.k_p*valueDifferenz\r\n \r\n self.sum_value_difference+=valueDifferenz\r\n y_i=self.k_i*self.sum_value_difference\r\n \r\n if self.old_value==-1:\r\n y_d = 0\r\n self.old_value = actualValue\r\n self.old_time = int(round(time.time() * 1000/250)) #time in number of frames \r\n else:\r\n actual_time = int(round(time.time() * 1000/250))\r\n y_d = self.k_d*(self.old_value-actualValue)\r\n self.old_value = actualValue\r\n self.old_time = actual_time\r\n \r\n return y_p+y_i+y_d\r\n \r\n " }, { "alpha_fraction": 0.6281406879425049, "alphanum_fraction": 0.6733668446540833, "avg_line_length": 21.11111068725586, "blob_id": "1f9cf2ca588fa103c8c69d1967841b36187723cf", "content_id": "bc587f613b753a6568a73913512ff10d0785f1ce", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 199, "license_type": "permissive", "max_line_length": 45, "num_lines": 9, "path": "/Yolonese/utils/readImgFile.py", "repo_name": "xjsxujingsong/CDTM-Deep-Learning-Drones", "src_encoding": "UTF-8", "text": "import numpy as np\nimport os\n\ndef readImg(imgPath,h=224,w=224):\n dt = np.dtype(\"float32\")\n testArray = np.fromfile(imgPath,dtype=dt)\n\n image = np.reshape(testArray,[3,h,w])\n return image\n" }, { "alpha_fraction": 0.4402855336666107, "alphanum_fraction": 0.4602361023426056, "avg_line_length": 35.42333221435547, "blob_id": "f8e40149ecf69132dbeebecdf50e3eda95ba0084", "content_id": "b213f57113ab8c439a1854520ab796186cef08c3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10927, "license_type": "permissive", "max_line_length": 124, "num_lines": 300, "path": "/Yolonese/drone.py", "repo_name": "xjsxujingsong/CDTM-Deep-Learning-Drones", "src_encoding": "UTF-8", "text": "import os\nimport libardrone.libardrone as libardrone\nimport time\nfrom threading import Thread, Lock, Condition\nimport cv2\nimport numpy\nimport keras\nfrom YOLO import SimpleNet, convert_yolo_detections, do_nms_sort\nfrom actuators import Actuator\nfrom utils.TinyYoloNet import ReadTinyYOLONetWeights\n\n\nclass YOLODrone(object):\n def __init__(self, manual=True):\n self.key = None\n self.stop = False\n self.mutex = None\n self.manuel = manual\n self.PID = None\n self.boxes = None\n self.condition = Condition()\n self.update = False\n self.contours = None\n self.boxes_update = False\n self.image = None\n\n self.labels = [\"aeroplane\", \"bicycle\", \"bird\", \"boat\", \"bottle\", \"bus\", \"car\", \"cat\", \"chair\", \"cow\", \"diningtable\",\n \"dog\", \"horse\", \"motorbike\", \"person\", \"pottedplant\", \"sheep\", \"sofa\", \"train\", \"tvmonitor\"]\n yoloNet = ReadTinyYOLONetWeights(os.path.join(os.getcwd(), 'weights/yolo-tiny.weights'))\n # reshape weights in every layer\n for i in range(yoloNet.layer_number):\n l = yoloNet.layers[i]\n if (l.type == 'CONVOLUTIONAL'):\n weight_array = l.weights\n n = weight_array.shape[0]\n weight_array = weight_array.reshape((n // (l.size * l.size), (l.size * l.size)))[:, ::-1].reshape((n,))\n weight_array = numpy.reshape(weight_array, [l.n, l.c, l.size, l.size])\n l.weights = weight_array\n if (l.type == 'CONNECTED'):\n weight_array = l.weights\n weight_array = numpy.reshape(weight_array, [l.input_size, l.output_size])\n l.weights = weight_array\n\n self.model = SimpleNet(yoloNet)\n sgd = keras.optimizers.SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)\n self.model.compile(optimizer=sgd, loss='categorical_crossentropy')\n\n def start(self):\n self.drone = libardrone.ARDrone(True)\n self.drone.reset()\n\n if self.manuel:\n try:\n self.mutex = Lock()\n t1 = Thread(target=self.getKeyInput, args=())\n t2 = Thread(target=self.getVideoStream, args=())\n t3 = Thread(target=self.getBoundingBoxes, args=())\n t1.start()\n t2.start()\n t3.start()\n t1.join()\n t2.join()\n t3.join()\n except:\n print \"Error: unable to start thread\"\n else:\n try:\n self.mutex = Lock()\n t1 = Thread(target=self.autonomousFlight, args=(448, 448, 98, 0.1, self.labels,))\n t2 = Thread(target=self.getVideoStream, args=())\n t3 = Thread(target=self.getBoundingBoxes, args=())\n t1.start()\n t2.start()\n t3.start()\n t1.join()\n t2.join()\n t3.join()\n except:\n print \"Error: unable to start thread\"\n\n\n print(\"Shutting down...\")\n cv2.destroyAllWindows()\n self.drone.land()\n time.sleep(0.1)\n self.drone.halt()\n print(\"Ok.\")\n\n\n def getKeyInput(self):\n while not self.stop: # while 'bedingung true'\n time.sleep(0.1)\n\n\n if self.key == \"t\": # if 'bedingung true'\n self.drone.takeoff()\n elif self.key == \" \":\n self.drone.land()\n elif self.key == \"0\":\n self.drone.hover()\n elif self.key == \"w\":\n self.drone.move_forward()\n elif self.key == \"s\":\n self.drone.move_backward()\n elif self.key == \"a\":\n self.drone.move_left()\n elif self.key == \"d\":\n self.drone.move_right()\n elif self.key == \"q\":\n self.drone.turn_left()\n elif self.key == \"e\":\n self.drone.turn_right()\n elif self.key == \"8\":\n self.drone.move_up()\n elif self.key == \"2\":\n self.drone.move_down()\n elif self.key == \"c\":\n self.stop = True\n else:\n self.drone.hover()\n\n if self.key != \" \":\n self.key = \"\"\n\n def getVideoStream(self, img_width=448, img_height=448):\n while not self.stop:\n img = self.image\n if img != None:\n nav_data = self.drone.get_navdata()\n nav_data = nav_data[0]\n font = cv2.FONT_HERSHEY_SIMPLEX\n font_size = 0.5\n\n cv2.putText(img, 'Altitude: %.0f' % nav_data['altitude'], (5, 15), font, font_size, (255, 255, 255))\n cv2.putText(img, 'Battery: %.0f%%' % nav_data['battery'], (5, 30), font, font_size, (255, 255, 255))\n\n cv2.drawContours(img, self.contours, -1, (0, 255, 0), 3)\n thresh = 0.2\n self.mutex.acquire()\n if self.boxes_update:\n self.boxes_update = False\n for b in self.boxes:\n max_class = numpy.argmax(b.probs)\n prob = b.probs[max_class]\n if (prob > thresh and self.labels[max_class] == \"person\"):\n left = (b.x - b.w / 2.) * img_width\n right = (b.x + b.w / 2.) * img_width\n\n top = (b.y - b.h / 2.) * img_height\n bot = (b.y + b.h / 2.) * img_height\n\n cv2.rectangle(img, (int(left), int(top)), (int(right), int(bot)), (0, 0, 255), 3)\n self.mutex.release()\n cv2.imshow('frame', img)\n\n l = cv2.waitKey(150)\n if l < 0:\n self.key = \"\"\n else:\n self.key = chr(l)\n if self.key == \"c\":\n self.stop = True\n\n def variance_of_laplacian(self, image):\n # compute the Laplacian of the image and then return the focus\n # measure, which is simply the variance of the Laplacian\n return cv2.Laplacian(image, cv2.CV_64F).var()\n\n def getBoundingBoxes(self):\n newest = time.time()\n while not self.stop:\n try:\n pixelarray = self.drone.get_image()\n pixelarray = cv2.cvtColor(pixelarray, cv2.COLOR_BGR2RGB)\n\n # Check for Blurry\n gray = cv2.cvtColor(pixelarray, cv2.COLOR_RGB2GRAY)\n fm = self.variance_of_laplacian(gray)\n if fm < 10:\n continue\n\n if pixelarray != None:\n # ima = pixelarray[120:540]\n ima = cv2.resize(pixelarray, (448, 448))\n\n image = cv2.cvtColor(ima, cv2.COLOR_RGB2BGR)\n\n image = numpy.rollaxis(image, 2, 0)\n image = image / 255.0\n image = image * 2.0 - 1.0\n image = numpy.expand_dims(image, axis=0)\n\n out = self.model.predict(image)\n predictions = out[0]\n boxes = convert_yolo_detections(predictions)\n\n self.mutex.acquire()\n self.boxes = do_nms_sort(boxes, 98)\n self.image = ima\n self.update = True\n self.mutex.release()\n\n except:\n pass\n\n def autonomousFlight(self, img_width, img_height, num, thresh, labels):\n actuator = Actuator(self.drone, img_width, img_width * 0.5)\n\n print self.drone.navdata\n while not self.stop:\n if self.update == True:\n self.update = False\n\n hsv = cv2.cvtColor(self.image, cv2.COLOR_BGR2HSV)\n image = cv2.medianBlur(hsv, 3)\n\n # Filter by color red\n lower_red_1 = numpy.array([15, 150, 150])\n upper_red_1 = numpy.array([35, 255, 255])\n\n image = cv2.inRange(image, lower_red_1, upper_red_1)\n\n # Put on median blur to reduce noise\n image = cv2.medianBlur(image, 11)\n\n # Find contours and decide if hat is one of them\n contours, hierarchy = cv2.findContours(image.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n\n self.contours = contours\n\n boxes = self.boxes\n\n best_prob = -99999\n best_box = -1\n best_contour = None\n\n self.mutex.acquire()\n for i in range(num):\n # for each box, find the class with maximum prob\n max_class = numpy.argmax(boxes[i].probs)\n prob = boxes[i].probs[max_class]\n\n temp = boxes[i].w\n boxes[i].w = boxes[i].h\n boxes[i].h = temp\n\n if prob > thresh and labels[max_class] == \"person\":\n\n for contour in contours:\n x, y, w, h = cv2.boundingRect(contour)\n\n left = (boxes[i].x - boxes[i].w / 2.) * img_width\n right = (boxes[i].x + boxes[i].w / 2.) * img_width\n\n top = (boxes[i].y - boxes[i].h / 2.) * img_height\n bot = (boxes[i].y + boxes[i].h / 2.) * img_height\n\n if not (x + w < left or right < x or y + h < top or bot < y):\n if best_prob < prob and w > 30:\n print \"prob found\"\n best_prob = prob\n best_box = i\n best_contour = contour\n\n self.boxes_update = True\n if best_box < 0:\n # print \"No Update\"\n self.mutex.release()\n self.drone.at(libardrone.at_pcmd, False, 0, 0, 0, 0)\n continue\n\n b = boxes[best_box]\n\n left = (b.x - b.w / 2.) * img_width\n right = (b.x + b.w / 2.) * img_width\n\n top = (b.y - b.h / 2.) * img_height\n bot = (b.y + b.h / 2.) * img_height\n\n\n if (left < 0): left = 0;\n if (right > img_width - 1): right = img_width - 1;\n if (top < 0): top = 0;\n if (bot > img_height - 1): bot = img_height - 1;\n\n width = right - left\n height = bot - top\n x, y, w, h = cv2.boundingRect(best_contour)\n\n actuator.step(right - width/2., width)\n self.mutex.release()\n\n\ndef main():\n drone = YOLODrone(manual=False)\n drone.start()\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.5240073800086975, "alphanum_fraction": 0.552169919013977, "avg_line_length": 31.353845596313477, "blob_id": "a8d05a633e178356952a474e6acab971d9fbbac4", "content_id": "c8ce114141d9f36ab56150eaf49be15b243af2cc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2166, "license_type": "permissive", "max_line_length": 96, "num_lines": 65, "path": "/C-HAWK/patternRecognition.py", "repo_name": "xjsxujingsong/CDTM-Deep-Learning-Drones", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\r\n\"\"\"Pattern recognition\r\n\r\nThis file provides functions to find well-defined patterns in an image\r\n\"\"\"\r\nimport cv2\r\n\r\ndef logFileWrite(file,msg):\r\n pass\r\n# file.write(\"%s,%s\\n\" % (str(time.time()), msg))\r\n\r\ndef cornerPointsChess(img,logFile):\r\n \"\"\"Find chessboard in image\r\n \r\n Args:\r\n img: An openCV image ndarray in a grayscale or color format.\r\n logFile: File handler for logfile.\r\n \"\"\"\r\n NBR_COLUMNS = 3\r\n NBR_ROWS = 3\r\n \r\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) \r\n ret, corners = cv2.findChessboardCorners(gray, (NBR_COLUMNS,NBR_ROWS),None)\r\n \r\n xalt=[]\r\n yalt=[]\r\n sumV=[]\r\n \r\n if ret == True:\r\n# print \"Chessboard found!\"\r\n logFileWrite(logFile,\"Chessboard found!\")\r\n cv2.imwrite('testewr.png',img)\r\n #Find left-top corner value && right-bottom corner value\r\n xalt.append(round(corners[0][0][0]))\r\n yalt.append(round(corners[0][0][1]))\r\n xalt.append(round(corners[NBR_COLUMNS-1][0][0]))\r\n yalt.append(round(corners[NBR_COLUMNS-1][0][1]))\r\n xalt.append(round(corners[NBR_COLUMNS*NBR_ROWS-1][0][0]))\r\n yalt.append(round(corners[NBR_COLUMNS*NBR_ROWS-1][0][1]))\r\n xalt.append(round(corners[(NBR_COLUMNS-1)*NBR_ROWS][0][0]))\r\n yalt.append(round(corners[(NBR_COLUMNS-1)*NBR_ROWS][0][1])) \r\n for i in range(0, 4):\r\n sumV.append(xalt[i] + yalt[i])\r\n minV = min(sumV)\r\n maxV = max(sumV)\r\n logFileWrite(logFile,\"Sum: \"+str(sumV))\r\n for i in range(0, 4):\r\n if minV==sumV[i]:\r\n x1 = xalt[i]; y1 = yalt[i]\r\n if maxV==sumV[i]:\r\n x2 = xalt[i]; y2 = yalt[i]\r\n \r\n logFileWrite(logFile,\"Endpoints: (\"+str(x1)+\",\"+str(y1)+\") ; (\"+str(x2)+\",\"+str(y2)+\")\")\r\n # Draw and display the corners (ADD FRAMES)\r\n cv2.drawChessboardCorners(img, (NBR_COLUMNS,NBR_ROWS), corners,ret)\r\n\r\n else:\r\n logFileWrite(logFile,\"Chessboard not found!\")\r\n x1=-1; y1=-1; x2=-1; y2=-1\r\n \r\n cv2.imshow('img',img)\r\n cv2.waitKey(1)\r\n \r\n return x1,y1,x2,y2" }, { "alpha_fraction": 0.545866072177887, "alphanum_fraction": 0.5966166257858276, "avg_line_length": 38.59434127807617, "blob_id": "f38e5a14fcd0c6807abd125d233ed6b4b50aaf96", "content_id": "4167ee2edd9ce21bbf21fb76853a878a98730cd5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4197, "license_type": "permissive", "max_line_length": 110, "num_lines": 106, "path": "/Yolonese/utils/TinyYoloNetforLoc.py", "repo_name": "xjsxujingsong/CDTM-Deep-Learning-Drones", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom enum import Enum\nimport os\n\nclass layer:\n def __init__(self,size,c,n,h,w,type):\n self.size = size\n self.c = c\n self.n = n\n self.h = h\n self.w = w\n self.type = type\n\nclass convolutional_layer(layer):\n def __init__(self,size,c,n,h,w):\n layer.__init__(self,size,c,n,h,w,\"CONVOLUTIONAL\")\n self.biases = np.zeros(n)\n self.weights = np.zeros((size*size,c,n))\n\nclass connected_layer(layer):\n def __init__(self,size,c,n,h,w,input_size,output_size):\n layer.__init__(self,size,c,n,h,w,\"CONNECTED\")\n self.output_size = output_size\n self.input_size = input_size\n self.biases = np.zeros(output_size)\n self.weights = np.zeros((output_size*input_size))\n\nclass Tiny_YOLO:\n layers = []\n layer_number = 22\n def __init__(self):\n self.layers.append(layer(0,0,0,0,0,\"CROP\"))\n self.layers.append(convolutional_layer(3,3,16,448,448))\n self.layers.append(layer(0,0,0,0,0,\"MAXPOOL\"))\n self.layers.append(convolutional_layer(3,16,32,224,224))\n self.layers.append(layer(0,0,0,0,0,\"MAXPOOL\"))\n self.layers.append(convolutional_layer(3,32,64,112,112))\n self.layers.append(layer(0,0,0,0,0,\"MAXPOOL\"))\n self.layers.append(convolutional_layer(3,64,128,56,56))\n self.layers.append(layer(0,0,0,0,0,\"MAXPOOL\"))\n self.layers.append(convolutional_layer(3,128,256,28,28))\n self.layers.append(layer(0,0,0,0,0,\"MAXPOOL\"))\n self.layers.append(convolutional_layer(3,256,512,14,14))\n self.layers.append(layer(0,0,0,0,0,\"MAXPOOL\"))\n self.layers.append(convolutional_layer(3,512,1024,7,7))\n self.layers.append(convolutional_layer(3,1024,1024,7,7))\n self.layers.append(convolutional_layer(3,1024,1024,7,7))\n self.layers.append(layer(0,0,0,0,0,\"FLATTEN\"))\n self.layers.append(connected_layer(0,0,0,0,0,50176,256))\n self.layers.append(connected_layer(0,0,0,0,0,256,4096))\n self.layers.append(layer(0,0,0,0,0,\"DROPOUT\"))\n self.layers.append(layer(0,0,0,0,0,\"LEAKY\"))\n self.layers.append(connected_layer(0,0,0,0,0,4096,49))\n\ndef ReadTinyYOLONetWeights(weight_path):\n YOLO = Tiny_YOLO()\n type_string = \"(3)float32,i4,\"\n for i in range(YOLO.layer_number):\n l = YOLO.layers[i]\n if(l.type == \"CONVOLUTIONAL\"):\n bias_number = l.n\n weight_number = l.n*l.c*l.size*l.size\n type_string = type_string +\"(\"+ str(bias_number) + \")float32,(\" + str(weight_number) + \")float32,\"\n elif(l.type == \"CONNECTED\"):\n bias_number = l.output_size\n weight_number = l.output_size * l.input_size\n type_string = type_string + \"(\"+ str(bias_number) + \")float32,(\"+ str(weight_number)+\")float32\"\n if(i != YOLO.layer_number-1):\n type_string = type_string + \",\"\n #dt = np.dtype((+str(64)+\")float32\"))\n #type_string = type_string + \",i1\"\n dt = np.dtype(type_string)\n testArray = np.fromfile(weight_path,dtype=dt)\n #write the weights read from file to GoogleNet biases and weights\n\n count = 2\n for i in range(0,YOLO.layer_number):\n l = YOLO.layers[i]\n if(l.type == \"CONVOLUTIONAL\" or l.type == \"CONNECTED\"):\n l.biases = np.asarray(testArray[0][count])\n count = count + 1\n l.weights = np.asarray(testArray[0][count])\n count = count + 1\n YOLO.layers[i] = l\n\n #write back to file and see if it is the same\n '''\n write_fp = open('reconstruct.weights','w')\n write_fp.write((np.asarray(testArray[0][0])).tobytes())\n write_fp.write((np.asarray(testArray[0][1])).tobytes())\n for i in range(0,YOLO.layer_number):\n l = YOLO.layers[i]\n if(l.type == \"CONVOLUTIONAL\" or l.type == \"CONNECTED\"):\n write_fp.write(l.biases.tobytes())\n write_fp.write(l.weights.tobytes())\n\n\n write_fp.close()\n '''\n return YOLO\n\nif __name__ == '__main__':\n YOLO = ReadTinyYOLONetWeights('/home/xuetingli/Documents/YOLO.keras/weights/yolo-tiny.weights')\n for i in range(YOLO.layer_number):\n l = YOLO.layers[i]\n print l.type\n" }, { "alpha_fraction": 0.5528775453567505, "alphanum_fraction": 0.5929660797119141, "avg_line_length": 37.72380828857422, "blob_id": "865f54fab95f726fb1826ce335abfb5e83d1c94e", "content_id": "ec77b58dcfd775f29908c580e59f858f03c33dac", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4066, "license_type": "permissive", "max_line_length": 110, "num_lines": 105, "path": "/Yolonese/utils/DarkNet.py", "repo_name": "xjsxujingsong/CDTM-Deep-Learning-Drones", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom enum import Enum\nimport os\n\nclass layer:\n def __init__(self,size,c,n,h,w,type):\n self.size = size\n self.c = c\n self.n = n\n self.h = h\n self.w = w\n self.type = type\n\nclass convolutional_layer(layer):\n def __init__(self,size,c,n,h,w):\n layer.__init__(self,size,c,n,h,w,\"CONVOLUTIONAL\")\n self.biases = np.zeros(n)\n self.weights = np.zeros((size*size,c,n))\n\nclass connected_layer(layer):\n def __init__(self,size,c,n,h,w,input_size,output_size):\n layer.__init__(self,size,c,n,h,w,\"CONNECTED\")\n self.output_size = output_size\n self.input_size = input_size\n\nclass DarkNet:\n layers = []\n layer_number = 18\n def __init__(self):\n self.layers.append(layer(0,0,0,0,0,\"CROP\"))\n self.layers.append(convolutional_layer(3,3,16,224,224))\n self.layers.append(layer(0,0,0,0,0,\"MAXPOOL\"))\n self.layers.append(convolutional_layer(3,16,32,112,112))\n self.layers.append(layer(0,0,0,0,0,\"MAXPOOL\"))\n self.layers.append(convolutional_layer(3,32,64,56,56))\n self.layers.append(layer(0,0,0,0,0,\"MAXPOOL\"))\n self.layers.append(convolutional_layer(3,64,128,28,28))\n self.layers.append(layer(0,0,0,0,0,\"MAXPOOL\"))\n self.layers.append(convolutional_layer(3,128,256,14,14))\n self.layers.append(layer(0,0,0,0,0,\"MAXPOOL\"))\n self.layers.append(convolutional_layer(3,256,512,7,7))\n self.layers.append(layer(0,0,0,0,0,\"MAXPOOL\"))\n self.layers.append(convolutional_layer(3,512,1024,4,4))\n self.layers.append(layer(0,0,0,0,0,\"AVGPOOL\"))\n self.layers.append(connected_layer(0,0,0,0,0,1024,1000))\n self.layers.append(layer(0,0,0,0,0,\"SOFTMAX\"))\n self.layers.append(layer(0,0,0,0,0,\"COST\"))\n\ndef ReadDarkNetWeights(weight_path):\n darkNet = DarkNet()\n type_string = \"(3)float32,i4,\"\n for i in range(darkNet.layer_number):\n l = darkNet.layers[i]\n if(l.type == \"CONVOLUTIONAL\"):\n bias_number = l.n\n weight_number = l.n*l.c*l.size*l.size\n type_string = type_string +\"(\"+ str(bias_number) + \")float32,(\" + str(weight_number) + \")float32,\"\n elif(l.type == \"CONNECTED\"):\n bias_number = l.output_size\n weight_number = l.output_size * l.input_size\n type_string = type_string + \"(\"+ str(bias_number) + \")float32,(\"+ str(weight_number)+\")float32\"\n #dt = np.dtype((+str(64)+\")float32\"))\n #type_string = type_string + \",i1\"\n dt = np.dtype(type_string)\n testArray = np.fromfile(weight_path,dtype=dt)\n #write the weights read from file to GoogleNet biases and weights\n\n count = 2\n for i in range(0,darkNet.layer_number):\n l = darkNet.layers[i]\n if(l.type == \"CONVOLUTIONAL\" or l.type == \"CONNECTED\"):\n l.biases = np.asarray(testArray[0][count])\n count = count + 1\n l.weights = np.asarray(testArray[0][count])\n count = count + 1\n darkNet.layers[i] = l\n if(l.type == 'CONNECTED'):\n weight_array = l.weights\n weight_array = np.reshape(weight_array,[l.input_size,l.output_size])\n weight_array = weight_array.transpose()\n #print i,count\n\n #write back to file and see if it is the same\n\n write_fp = open('reconstruct.weights','w')\n write_fp.write((np.asarray(testArray[0][0])).tobytes())\n write_fp.write((np.asarray(testArray[0][1])).tobytes())\n for i in range(0,darkNet.layer_number):\n l = darkNet.layers[i]\n if(l.type == \"CONVOLUTIONAL\" or l.type == \"CONNECTED\"):\n write_fp.write(l.biases.tobytes())\n write_fp.write(l.weights.tobytes())\n\n\n write_fp.close()\n\n return darkNet\n\nif __name__ == '__main__':\n darkNet = ReadDarkNetWeights('/home/xuetingli/Documents/YOLO.keras/weights/darknet.weights')\n for i in range(darkNet.layer_number):\n l = darkNet.layers[i]\n print l.type\n if(l.type == 'CONNECTED'):\n print l.weights.shape\n" }, { "alpha_fraction": 0.7746591567993164, "alphanum_fraction": 0.7762630581855774, "avg_line_length": 67.27777862548828, "blob_id": "e734b6bb5130a1533a204365d1c3f4d36953e797", "content_id": "dbada8466435b5e1700d3e06922bb4a99ccfa21f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2494, "license_type": "permissive", "max_line_length": 218, "num_lines": 36, "path": "/C-HAWK/README.md", "repo_name": "xjsxujingsong/CDTM-Deep-Learning-Drones", "src_encoding": "UTF-8", "text": "# C-HAWK\r\n\r\nThis drone control mainly uses the libraries OpenCV and [libardrone](https://github.com/venthur/python-ardrone).\r\nThe drone will follow a chessboard. For this function the [chessboard recognition](http://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_calib3d/py_calibration/py_calibration.html) from OpenCV is used.\r\nAfter the drone has detected the chessboard, it computes the middle of the board and the length of the diagonal of the board.\r\nThese informations are put into three different PD-Controller ([PID-Controller](https://en.wikipedia.org/wiki/PID_controller) with the I-constant equal to 0).\r\nThereby three \"directions\" are optimised:\r\n* x-coordinate -> left/right-control\r\n* y-coordinate -> height-control\r\n* length of diagonale -> backwards/forwards-control\r\n\r\n## To run the program:\r\nYou have to download the GitHub project [libardrone](https://github.com/venthur/python-ardrone).\r\nThen put the files from this project into the libardrone folder.\r\nConnect your laptop to the drone and run CentralControl.py.\r\nFor takeoff press any key. \r\nYou should be able to see the front camera livestream and if the drone detects a chessboard, you will see a marking in the livestream.\r\nNow the drone should try to follow the chessboard.\r\nFor landing and shutting down the drone press space.\r\n\r\n## Overview over the included files:\r\n* CentralControl.py:\r\n + the drone is started\r\n + values of the patternRecognition are received, interpreted and given to the three PD-Controllers\r\n + values of the PD-Controllers are received, interpreted and used to calculate the speeds and directions for actuating the desired movements\r\n + the drone is landed and shut down\r\n* PIDController.py: a normal implementation of a PID-Controller where the constants are set via parameters\r\n* Testprotocol.txt: some example values for speed settings which worked well\r\n* patternRecognition.py:\r\n + function that detects a (n x m) chessboard in an image and returns the coordinates of the left upper corner and the right bottom corner\r\n + the function uses 'findChessboardCorners' of the openCV library to find the chessboard\r\n + the chessboard is marked in the original image and is shown in a seperate window\r\n* schachbrettmuster.jpg: example chessboard (the code expects this chessboard size, but you can configure patternRecognition.py if you want to use another chessboard size)\r\n* schachmuster_5x5.jpg: example chessboard\r\n\r\n**_Authors:_** Christian Gebhardt and Christian Münch\r\n" }, { "alpha_fraction": 0.5075885057449341, "alphanum_fraction": 0.5311973094940186, "avg_line_length": 33.882354736328125, "blob_id": "22cb882980437c09b27b966e5974794ba93ff2bd", "content_id": "8d2dc3713da6332bcb67ef151856bcd78068b4a9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 593, "license_type": "permissive", "max_line_length": 106, "num_lines": 17, "path": "/Yolonese/PID.py", "repo_name": "xjsxujingsong/CDTM-Deep-Learning-Drones", "src_encoding": "UTF-8", "text": "class PID(object):\n def __init__(self, K_p=0.4, K_d=0.00, K_i=0.00, dt=0.5):\n self.K_p = K_p\n self.K_d = K_d\n self.K_i = K_i\n self.dt = dt\n self.w = 0\n self.velocity = 0\n self.errorsum = 0\n self.actual_previous = 0\n\n def step(self, desired, actual):\n self.errorsum = (desired - actual) * self.dt\n self.velocity = (actual - self.actual_previous) / self.dt\n u = self.K_p * (desired - actual) + self.K_d * (self.w - self.velocity) + self.K_i * self.errorsum\n self.actual_previous = actual\n return u\n" }, { "alpha_fraction": 0.45633038878440857, "alphanum_fraction": 0.49729180335998535, "avg_line_length": 29.77083396911621, "blob_id": "91048ca7db062f20f1e9b22920112d6e309de8ff", "content_id": "5763c9158d0ab8bfdfd5d93c6505acede6c40dc6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2954, "license_type": "permissive", "max_line_length": 76, "num_lines": 96, "path": "/Yolonese/utils/MeasureAccuray.py", "repo_name": "xjsxujingsong/CDTM-Deep-Learning-Drones", "src_encoding": "UTF-8", "text": "from keras.models import model_from_json\nimport theano.tensor as T\n\nfrom utils.readImgFile import readImg\nfrom utils.crop import crop_detection\nfrom utils.ReadPascalVoc2 import prepareBatch\n\nimport os\nimport numpy as np\n\ndef Acc(imageList,model,sample_number=5000,thresh=0.3):\n correct = 0\n object_num = 0\n\n count = 0\n for image in imageList:\n\tcount += 1\n #Get prediction from neural network\n img = crop_detection(image.imgPath,new_width=448,new_height=448)\n img = np.expand_dims(img, axis=0)\n out = model.predict(img)\n out = out[0]\n\n for i in range(49):\n preds = out[i*25:(i+1)*25]\n if(preds[24] > thresh):\n object_num += 1\n \trow = i/7\n \tcol = i%7\n '''\n \tcenterx = 64 * col + 64 * preds[0]\n \tcentery = 64 * row + 64 * preds[1]\n\n \th = preds[2] * preds[2]\n \th = h * 448.0\n \tw = preds[3] * preds[3]\n \tw = w * 448.0\n\n \tleft = centerx - w/2.0\n \tright = centerx + w/2.0\n \tup = centery - h/2.0\n \tdown = centery + h/2.0\n\n \tif(left < 0): left = 0\n \tif(right > 448): right = 447\n \tif(up < 0): up = 0\n \tif(down > 448): down = 447\n '''\n \tclass_num = np.argmax(preds[4:24])\n\n #Ground Truth\n box = image.boxes[row][col]\n if(box.has_obj):\n for obj in box.objs:\n true_class = obj.class_num\n if(true_class == class_num):\n correct += 1\n\t break\n\n\n return correct*1.0/object_num\n\ndef Recall(imageList,model,sample_number=5000,thresh=0.3):\n correct = 0\n obj_num = 0\n count = 0\n for image in imageList:\n count += 1\n #Get prediction from neural network\n img = crop_detection(image.imgPath,new_width=448,new_height=448)\n img = np.expand_dims(img, axis=0)\n out = model.predict(img)\n out = out[0]\n #for each ground truth, see we have predicted a corresponding result\n for i in range(49):\n preds = out[i*25:i*25+25]\n row = i/7\n col = i%7\n box = image.boxes[row][col]\n if(box.has_obj):\n for obj in box.objs:\n obj_num += 1\n true_class = obj.class_num\n #see what we predict\n if(preds[24] > thresh):\n predcit_class = np.argmax(preds[4:24])\n if(predcit_class == true_class):\n correct += 1\n return correct*1.0/obj_num\n\ndef MeasureAcc(model,sample_number,vocPath,imageNameFile):\n imageList = prepareBatch(0,sample_number,imageNameFile,vocPath)\n acc = Acc(imageList,model)\n re = Recall(imageList,model)\n\n return acc,re\n" } ]
20
glitton/personal-webpage
https://github.com/glitton/personal-webpage
6cc03b0e6439cbef1ca2ce57ea431f4c38c17cea
9e324bf44b547503e808e25276e16906e3c7b135
2be2daf0b79f40bfef88b5a91f72f07bb7029396
refs/heads/master
2021-07-07T03:18:46.387107
2021-05-13T06:27:04
2021-05-13T06:27:04
90,499,574
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4536784887313843, "alphanum_fraction": 0.4536784887313843, "avg_line_length": 24.75438690185547, "blob_id": "819f817c19871f4eb7c2b87cc5fd3320aff8e6f2", "content_id": "8210008cb1d9e721eba5414d5eb23301bc46789f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1468, "license_type": "no_license", "max_line_length": 54, "num_lines": 57, "path": "/static/js/portfolio.js", "repo_name": "glitton/personal-webpage", "src_encoding": "UTF-8", "text": "\"use strict\"\n\n$(document).ready(function(){\n // Stops YouTube video when modal window is closed\n\n // PAWS video\n $('.close-paws').on('click', function(){\n var video = $('#paws').attr('src');\n $('#paws').attr('src','');\n $('#paws').attr('src', video);\n }\n )\n\n // Hitachi Oracle video\n $('.close-oracle').on('click', function(){\n var video = $('#oracle').attr('src');\n $('#oracle').attr('src','');\n $('#oracle').attr('src', video);\n }\n )\n\n // Microgrids video\n $('.close-microgrids').on('click', function(){\n var video = $('#microgrids').attr('src');\n $('#microgrids').attr('src','');\n $('#microgrids').attr('src', video);\n }\n )\n\n // Analytics video\n $('.close-analytics').on('click', function(){\n var video = $('#analytics').attr('src');\n $('#analytics').attr('src','');\n $('#analytics').attr('src', video);\n }\n )\n\n // Whale shark video\n $('.close-whale-shark').on('click', function(){\n var video = $('#whale-shark').attr('src');\n $('#whale-shark').attr('src','');\n $('#whale-shark').attr('src', video);\n }\n )\n\n // Sea lions video\n $('.close-sea-lions').on('click', function(){\n var video = $('#sea-lions').attr('src');\n $('#sea-lions').attr('src','');\n $('#sea-lions').attr('src', video);\n }\n ) \n\n\n\n\n});\n" }, { "alpha_fraction": 0.6992263197898865, "alphanum_fraction": 0.7050290107727051, "avg_line_length": 24.850000381469727, "blob_id": "07d08535d62b1d0a85c74b0a73e6d7ee33288db6", "content_id": "8acbfaf2216208cd587082ece680ab1f226d7157", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1034, "license_type": "no_license", "max_line_length": 84, "num_lines": 40, "path": "/server.py", "repo_name": "glitton/personal-webpage", "src_encoding": "UTF-8", "text": "\"\"\"generosalitton.com web page\"\"\"\n\nfrom jinja2 import StrictUndefined\n\nfrom flask import Flask, render_template, request, flash, redirect, session, jsonify\nfrom flask_debugtoolbar import DebugToolbarExtension\n\n\napp = Flask(__name__)\n\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\n# Required to use Flask sessions and the debug toolbar\napp.secret_key = \"9js8jl5kK#HoaesjflksjRGkjsadfdiAuOiGHeoPkSaskdn760kA!\"\n\n# Normally, if you use an undefined variable in Jinja2, it fails silently.\n# This is horrible. Fix this so that, instead, it raises an error.\napp.jinja_env.undefined = StrictUndefined\n\n\[email protected](\"/\")\ndef index():\n \"\"\"Homepage.\"\"\"\n \n return render_template('index.html')\n\n#route for sending me an email, learn flask mail\n\n\nif __name__ == \"__main__\":\n # We have to set debug=True here, since it has to be True at the point\n # that we invoke the DebugToolbarExtension\n\n # Do not debug for demo\n app.debug = True\n app.jinja_env.auto_reload = app.debug\n\n # Use the DebugToolbar\n DebugToolbarExtension(app)\n app.run(host=\"0.0.0.0\")\n" } ]
2
shreyanvaidya/Python
https://github.com/shreyanvaidya/Python
5339df6660237f0156fa8d4fcca87e10a61ced1c
43674acdc6779a809c4999f759a06d30619ce0e0
1c47d9e3c025d76aa2a921300fc93482ea68ff8a
refs/heads/main
2023-03-23T20:49:01.153866
2021-03-20T17:43:38
2021-03-20T17:43:38
349,794,791
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5071749091148376, "alphanum_fraction": 0.5224215388298035, "avg_line_length": 38.54545593261719, "blob_id": "72b505c8af9b1251961bbd5849e4dc1e85981c32", "content_id": "1f9e24c8016afb99c50025600384aebea1eb7889", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2230, "license_type": "no_license", "max_line_length": 113, "num_lines": 55, "path": "/RockPaperScissors_Game.py", "repo_name": "shreyanvaidya/Python", "src_encoding": "UTF-8", "text": "import random\r\n\r\nwhile True:\r\n best_of = int(input(\"Enter the times you want to play\"))\r\n draw_count = 0\r\n win_count = 0\r\n loss_count = 0\r\n\r\n # function to count the winner\r\n def winner(win_count, draw_count, loss_count):\r\n if win_count == loss_count:\r\n return \"\\nGame is drawn\"\r\n elif win_count > draw_count and win_count > loss_count:\r\n return \"\\nPlayer wins\"\r\n elif draw_count > win_count and draw_count > loss_count:\r\n return \"\\nGame is drawn\"\r\n else:\r\n return \"\\nComputer wins\"\r\n\r\n\r\n if best_of % 2 == 0:\r\n print(\"\\nenter an odd number\")\r\n continue\r\n elif best_of % 2 != 0 and best_of > 2:\r\n game_round = 1\r\n while game_round <= best_of:\r\n user_choice = int(input('\\nenter your choice\\n1. for rock\\n2. for paper\\n3. for scissors \\n'))\r\n choice_dict = {1: 'Rock', 2: 'Paper', 3: 'Scissors'}\r\n winning_list = [(3, 2), (1, 3), (2, 1)]\r\n list1 = [1, 2, 3]\r\n computer_choice = random.choice(list1)\r\n if user_choice not in list1:\r\n print('enter valid number!!!\\n 1,2 or 3')\r\n continue\r\n print('\\ncomputer choice is ' + str(choice_dict[computer_choice]))\r\n print('\\nplayer choice is ' + str(choice_dict[user_choice]))\r\n if user_choice == computer_choice:\r\n draw_count += 1\r\n print('\\nGame is draw for round: ' + str(game_round))\r\n elif (user_choice, computer_choice) in winning_list:\r\n win_count += 1\r\n print('\\nplayer wins for round: ' + str(game_round))\r\n else:\r\n loss_count += 1\r\n print('\\ncomputer wins for round: ' + str(game_round))\r\n game_round += 1\r\n else:\r\n print(\"\\nenter a valid choice\")\r\n continue\r\n print(\"player won= \" + str(win_count) + \"\\ncomputer won= \" + str(loss_count) + \"\\ndrawn= \" + str(draw_count))\r\n print(winner(win_count, draw_count, loss_count))\r\n play_again = input('\\ndo you want to play again? \\n(y/n)\\n')\r\n if play_again.lower() != 'y':\r\n print('\\nthank you for playing!')\r\n break\r\n" } ]
1
AshutoshSwamy/Correlation
https://github.com/AshutoshSwamy/Correlation
7bf99533540cb2bec67a0aeee441a65480bc68b9
cbf88841e11791dd3b90945d3a2acf0ba1366c50
76ffeb885826a78d2ba211b823634721afc052b1
refs/heads/main
2023-03-04T04:40:00.955825
2021-02-21T13:44:59
2021-02-21T13:44:59
340,911,343
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6515151262283325, "alphanum_fraction": 0.6565656661987305, "avg_line_length": 26.571428298950195, "blob_id": "5c4ece245026afd1cfbe067ea8b48b97813a9a68", "content_id": "4cec0e212c33ea51c6ff78a5975fa358b4caf82a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 198, "license_type": "no_license", "max_line_length": 69, "num_lines": 7, "path": "/main2.py", "repo_name": "AshutoshSwamy/Correlation", "src_encoding": "UTF-8", "text": "import plotly.express as px\r\nimport csv\r\nwith open(\"./data2.csv\") as csv_file:\r\n df = csv.DictReader(csv_file)\r\n fig = px.scatter(df , x=\"Marks In Percentage\" , y=\"Days Present\")\r\n\r\nfig.show()" }, { "alpha_fraction": 0.5925433039665222, "alphanum_fraction": 0.5965379476547241, "avg_line_length": 23.03333282470703, "blob_id": "a1e7623508c2e2b765022194240edcbbb55132a7", "content_id": "6351b9b0d139f1bc5c4c3617cfb48b86fcb0b197", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 751, "license_type": "no_license", "max_line_length": 68, "num_lines": 30, "path": "/main3.py", "repo_name": "AshutoshSwamy/Correlation", "src_encoding": "UTF-8", "text": "import csv\r\nimport numpy as np\r\n\r\n\r\ndef getDataSource(dataPath):\r\n cups_of_coffee = []\r\n hours_of_sleep = []\r\n with open(dataPath) as csv_file:\r\n df = csv.DictReader(csv_file)\r\n for row in df:\r\n cups_of_coffee.append(float(row[\"Coffee in ml\"]))\r\n hours_of_sleep.append(int(row[\"sleep in hours\"]))\r\n return {\r\n \"x\": cups_of_coffee, \"y\": hours_of_sleep\r\n }\r\n\r\n\r\ndef findCorrelation(dataSource):\r\n correlation = np.corrcoef(dataSource[\"x\"], dataSource[\"y\"])\r\n print(\"Correlation between Cups of Coffee & Hours of Sleep :\\t\",\r\n correlation[0, 1])\r\n\r\n\r\ndef main():\r\n dataPath = \"./data3.csv\"\r\n dataSource = getDataSource(dataPath)\r\n findCorrelation(dataSource)\r\n\r\n\r\nmain()\r\n" }, { "alpha_fraction": 0.6464646458625793, "alphanum_fraction": 0.6464646458625793, "avg_line_length": 26.571428298950195, "blob_id": "d974d056dd4fe6ab46c5fd7ca398990b0b3130dd", "content_id": "ec343533d204c9aa3df441ea19ef21baf8cbb096", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 198, "license_type": "no_license", "max_line_length": 70, "num_lines": 7, "path": "/main.py", "repo_name": "AshutoshSwamy/Correlation", "src_encoding": "UTF-8", "text": "import plotly.express as px\r\nimport csv\r\nwith open(\"./data.csv\") as csv_file:\r\n df = csv.DictReader(csv_file)\r\n fig = px.scatter(df , x=\"Size of TV\" , y=\"Average Weekly TV Time\")\r\n\r\nfig.show()" }, { "alpha_fraction": 0.6008583903312683, "alphanum_fraction": 0.6051502227783203, "avg_line_length": 22.10344886779785, "blob_id": "5cec2fae3d6fecc3bbfc876407a4ec155ba6aad6", "content_id": "90ff60048133f9608e075d59f9839d1af25a50a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 699, "license_type": "no_license", "max_line_length": 77, "num_lines": 29, "path": "/main2a.py", "repo_name": "AshutoshSwamy/Correlation", "src_encoding": "UTF-8", "text": "import csv\r\nimport numpy as np\r\n\r\n\r\ndef getDataSource(dataPath):\r\n marks = []\r\n daysPresent = []\r\n with open(dataPath) as csv_file:\r\n df = csv.DictReader(csv_file)\r\n for row in df:\r\n marks.append(float(row[\"Marks In Percentage\"]))\r\n daysPresent.append(int(row[\"Days Present\"]))\r\n return {\r\n \"x\": marks, \"y\": daysPresent\r\n }\r\n\r\n\r\ndef findCorrelation(dataSource):\r\n correlation = np.corrcoef(dataSource[\"x\"], dataSource[\"y\"])\r\n print(\"Correlation bertween Marks & Days Present :\\t\", correlation[0, 1])\r\n\r\n\r\ndef main():\r\n dataPath = \"./data2.csv\"\r\n dataSource = getDataSource(dataPath)\r\n findCorrelation(dataSource)\r\n\r\n\r\nmain()\r\n" } ]
4
WesStutzman/Rsync
https://github.com/WesStutzman/Rsync
92faaef6d84bd1ba9a5b30e57933e746e9021f03
1c4aa8854d79578c02a457e09f3b3b27b979a31b
671d09ce1ba4cbb70b8de165790b8d7db37b0e8e
refs/heads/master
2021-01-20T09:51:52.417002
2017-05-04T18:16:58
2017-05-04T18:16:58
90,293,923
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.7407407164573669, "avg_line_length": 12.5, "blob_id": "6da1230bd0714350e29d0d802879144eba005b0f", "content_id": "5aa0d67a03300d85b8c8d484140cf260d6c969e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 27, "license_type": "no_license", "max_line_length": 18, "num_lines": 2, "path": "/README.md", "repo_name": "WesStutzman/Rsync", "src_encoding": "UTF-8", "text": "# Rsync\nRsync in Python3.5\n" }, { "alpha_fraction": 0.6829670071601868, "alphanum_fraction": 0.6843406558036804, "avg_line_length": 31.79279327392578, "blob_id": "0ded7f4711b4417f50906f308cfd174c1587496e", "content_id": "9c34c1111bfa4b7e9bcc796b353623d848e90c0a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3640, "license_type": "no_license", "max_line_length": 81, "num_lines": 111, "path": "/rsync.py", "repo_name": "WesStutzman/Rsync", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3.5\n# Wesley Stutzman\n# Rsync\n\n# Designed to clone a client file to a server\n# Only change files that have been edited since last update\n\nimport os\nimport sys\nimport shutil\n\n# Return true if a path is file or directory\ndef check_valid_path(server_name):\n return os.path.isfile(server_name) or os.path.isdir(server_name)\n\nclass rsync:\n\n # Input in arguments from start\n def __init__(self, client_storage=None, server_storage=None):\n self.client_storage = client_storage\n self.server_storage = server_storage\n changes = []\n \n # Check that files are valid and execute updates\n def update_changes(self):\n assert self.client_storage is not None\n assert self.server_storage is not None\n return self.update_changes_helper(\"\")\n\n # Use recursion to check for update files\n def update_changes_helper(self, extension):\n # Create working paths for both root files with new extentions\n client_directory = os.path.join(self.client_storage, extension)\n server_directory = os.path.join(self.server_storage, extension)\n\n # Get the content of the current directory\n directory_content = sorted(os.listdir(client_directory))\n\n # Go through all items inside the directory\n for files in directory_content:\n client_new_path = os.path.join(client_directory, files)\n server_new_path = os.path.join(server_directory, files)\n\n # Recursivly climb through the directories\n if os.path.isdir(client_new_path):\n if not os.path.exists(server_new_path):\n os.makedirs(server_new_path)\n print(\"Creating Directory: \" + server_new_path)\n self.update_changes_helper(os.path.join(extension, files))\n \n # Check if the file needs updating\n update_file = False\n if not os.path.exists(server_new_path):\n update_file = True\n elif os.path.getmtime(client_new_path) > os.path.getmtime(server_new_path):\n update_file = True\n \n # Update the file if needed\n if update_file:\n if os.path.isfile(client_new_path):\n shutil.copy(client_new_path, server_new_path)\n print(\"Updating File: \" + server_new_path)\n\n # Start updating paths for the user\n def update_paths(self, client_storage=None, server_storage=None):\n if client_storage is not None:\n self.set_client_storage(client_storage)\n if server_storage is not None:\n self.set_server_storage(server_storage)\n\n self.update_changes()\n # assert self.client_storage or self.server_storage\n # changes = self.find_changes(self.client_storage, self.server_storage)\n \n # Set client server location name\n def set_client_storage(self, client_name):\n\n # Hold a return value for later\n return_value = None\n\n # Check for valid input\n if check_valid_path(client_name):\n self.client_storage = client_name\n return_value = client_name\n else:\n print(\"ERROR INVALID CLIENT NAME FOR STORAGE\")\n return return_value\n\n # Set server storage location name\n def set_server_storage(self, server_name):\n \n # Hold a return value for later\n return_value = None\n\n # Check for valie input\n if check_valid_path(server_name):\n self.server_storage = server_name\n return_value = server_name\n else:\n print(\"ERROR INVALID CLIENT NAME FOR STORAGE\")\n return return_value\n\nif __name__ == \"__main__\":\n print(\"Starting\")\n test = rsync()\n if len(sys.argv) == 3:\n test.update_paths(sys.argv[1], sys.argv[2])\n else:\n client_directory = input(\"Enter client directory: \")\n server_directory = input(\"Enter server directory: \")\n test.update_paths(client_directory, server_directory)\n" } ]
2
bradyte/ece5831_hw3
https://github.com/bradyte/ece5831_hw3
bf34432f029681f519e32fe7e01e34db6f2c6153
97188e97ca4cb62c4f87bb5def02f177d7c5477c
64b76776e7b7bae98c2eeada66b7ec808d1cbf89
refs/heads/master
2021-08-28T07:10:05.635922
2017-12-11T14:40:41
2017-12-11T14:40:41
113,100,301
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5458643436431885, "alphanum_fraction": 0.5795766115188599, "avg_line_length": 31.48407554626465, "blob_id": "da961fa513497f590b0e05d236f9c5ef39f9ce6c", "content_id": "92ff38dd473b5d57e89d223d719cc45a38ee0c64", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5102, "license_type": "no_license", "max_line_length": 116, "num_lines": 157, "path": "/hw3.py", "repo_name": "bradyte/ece5831_hw3", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 4 17:05:07 2017\n\n@author: tbrady\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.pylab as pylab\nfrom matplotlib import cm\nimport time\nimport os\nfrom hmmlearn import hmm\nfrom python_speech_features import mfcc, logfbank\nfrom scipy.io import wavfile\nimport warnings\n\nimport operator\n\nwarnings.filterwarnings(\"ignore\")\n\ntsys = time.time()\n###############################################################################\n'''\nGet the filepaths of the wav files\nhttps://github.com/yashiro32/speech_recognition/blob/master/speech_rec_w_essentia.py~\n'''\n###############################################################################\nfpaths = []\nlabels = []\nspoken = []\nsample_rate = 16000\n\nfor f in os.listdir('trainingSet'):\n if not f.startswith('.'):\n for w in os.listdir('trainingSet/' + f):\n if not w.startswith('.'):\n fpaths.append('trainingSet/' + f + '/' + w)\n labels.append(f)\n if f not in spoken:\n spoken.append(f)\n\n###############################################################################\n'''\nRead the data into the program\n\nFind the filterbanks\nhttp://www.practicalcryptography.com/miscellaneous/machine-learning/guide-mel-frequency-cepstral-coefficients-mfccs/\n\nFind the MFCCs\nhttp://www.practicalcryptography.com/miscellaneous/machine-learning/guide-mel-frequency-cepstral-coefficients-mfccs/\n\n'''\n############################################################################### \ndata = np.zeros((len(fpaths),sample_rate))\nfbank_feats = []\nmfcc_feats = []\nmaxsize = -1\nlengths = []\nfor n, file in enumerate(fpaths):\n sample_rate, d = wavfile.read(file)\n data[n, :d.shape[0]] = d\n if d.shape[0] > maxsize:\n maxsize = d.shape[0]\n fbank_feats.append(logfbank(d,samplerate=16000,winlen=0.025,winstep=0.01,\n nfilt=40,nfft=512,preemph=0.97))\n mfcc_feats.append(mfcc(d,samplerate=16000,winlen=0.025,winstep=0.01,numcep=13,\n nfilt=40,nfft=512,preemph=0.97,ceplifter=22,appendEnergy=True))\n lengths.append(len(mfcc_feats[n]))\ndata = data[:, :maxsize]\nidx = 0\nX = np.concatenate(mfcc_feats)\n\n###############################################################################\n'''\nRun the HMM\nhttp://practicalcryptography.com/miscellaneous/machine-learning/hidden-markov-model-hmm-tutorial/\n'''\n############################################################################### \nprint('Beginning HMM...')\nmodel = hmm.GaussianHMM(n_components=10, covariance_type='full').fit([X], lengths)\n\n#tfpaths = []\n#tlabels = []\n#tspoken = []\n#sample_rate = 16000\n#\n#for f in os.listdir('testingSet'):\n# if not f.startswith('.'):\n# for w in os.listdir('testingSet/' + f):\n# if not w.startswith('.'):\n# tfpaths.append('testingSet/' + f + '/' + w)\n# tlabels.append(f)\n# if f not in tspoken:\n# tspoken.append(f)\n#\n#tdata = np.zeros((len(tfpaths),sample_rate))\n#tfbank_feats = []\n#tmfcc_feats = []\n#maxsize = -1\n#for n, file in enumerate(tfpaths):\n# sample_rate, td = wavfile.read(file)\n# tdata[n, :td.shape[0]] = td\n# if td.shape[0] > maxsize:\n# maxsize = td.shape[0]\n# tfbank_feats.append(logfbank(td,samplerate=16000,winlen=0.025,winstep=0.01,\n# nfilt=40,nfft=512,preemph=0.97))\n# tmfcc_feats.append(mfcc(td,samplerate=16000,winlen=0.025,winstep=0.01,numcep=13,\n# nfilt=40,nfft=512,preemph=0.97,ceplifter=22,appendEnergy=True))\n# \n#tdata = tdata[:, :maxsize]\n\n\n\n#guessIndex = np.zeros(10)\n#correct = 0\n#ik = 90\n#for i in range(ik,ik+10):\n# guess = model.score_samples(tmfcc_feats[i])[1]\n# guessSum = np.sum(guess, axis=0)\n# index, value = max(enumerate(guessSum), key=operator.itemgetter(1))\n# guessIndex[index] = guessIndex[index] + 1\n#\n# \n## print('{}\\t{}'.format(tlabels[i],index))\n#index, value = max(enumerate(guessIndex), key=operator.itemgetter(1))\n#plt.figure(figsize=(2,2))\n#plt.bar(np.arange(10), np.array(guessIndex))\n#plt.xlabel('Class Guess = {}\\nCorrect Class = {}'.format(index, str(ik)[0]))\n#plt.title(tlabels[i])\n#\n#plt.figure(figsize=(10, 4))\n#wave = plt.plot(np.arange(0,1,1/16000), data[i,:], label = labels[idx], color='g')\n#plt.title('{} waveform from {}'.format(labels[idx], fpaths[idx]))\n#plt.xlabel('Time (s)')\n#plt.ylabel('Amplitude (32-bit Audio)')\n#plt.ylim([-2**15,2**15]) # 32-bit audio\n#plt.grid('on')\n#plt.legend(handles = wave)\n#\n#plt.figure(figsize=(10, 4))\n#pylab.imshow(fbank_feats[idx].T, origin='lower',aspect='auto', cmap='jet')\n#plt.title('{} filterbank from {}'.format(labels[idx], fpaths[idx]))\n#plt.ylabel('Filterbanks')\n#plt.xlabel('Time (10ms)')\n#\n#plt.figure(figsize=(10, 4))\n#pylab.imshow(mfcc_feats[idx].T, origin='lower',aspect='auto', cmap='jet')\n#plt.title('{} MFCCs from {}'.format(labels[idx], fpaths[idx]))\n#plt.ylabel('MFCCs')\n#plt.xlabel('Time (10ms)')\n\n\n\nprint('\\nExecution time: {}'.format(time.time() - tsys))\n\n\n" }, { "alpha_fraction": 0.565517246723175, "alphanum_fraction": 0.6241379380226135, "avg_line_length": 23.20833396911621, "blob_id": "a0c3f2858609f03084a17fe0c753e330e367659b", "content_id": "def0c744d377650b21fec900e3af267a5d130668", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 580, "license_type": "no_license", "max_line_length": 63, "num_lines": 24, "path": "/hw3_prettyPlots.py", "repo_name": "bradyte/ece5831_hw3", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Dec 9 10:57:35 2017\n\n@author: tbrady\n\"\"\"\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef printTimeDomainWaveform(data, label, fpaths):\n time_units = np.arange(0,1,1/16000)\n \n plt.figure(figsize=(10, 4))\n \n wave = plt.plot(time_units, data, label = label, color='g')\n plt.title('{} waveform from {}'.format(label, fpaths))\n plt.xlabel('Time (s)')\n plt.ylabel('Amplitude (32-bit Audio)')\n plt.ylim([-2**15,2**15]) # 32-bit audio\n plt.grid('on')\n plt.legend(handles = wave)" } ]
2
bwallace/OpenMeta-analyst-
https://github.com/bwallace/OpenMeta-analyst-
0fbc19f77018a72ce293e1c72e9b2c0a7eb3b615
e3147cab25e773251e7052f3bf27852ea41d522e
07f2ed7b3565c8d6679da4084bbb39930221da20
refs/heads/master
2021-01-21T04:37:15.296676
2020-07-28T00:52:29
2020-07-28T00:52:29
293,390
26
13
null
2009-08-31T16:25:12
2020-07-16T17:51:52
2020-07-28T00:52:30
Python
[ { "alpha_fraction": 0.5822928547859192, "alphanum_fraction": 0.5872114896774292, "avg_line_length": 40.3125, "blob_id": "daa9532e1c8823dd21234557eb5120e97191ac75", "content_id": "47dde98f1307058320b83f68d219922df2bb3062", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2643, "license_type": "no_license", "max_line_length": 87, "num_lines": 64, "path": "/src/network_view.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "from PyQt4.Qt import *\nimport forms.ui_network_view\nimport edit_list_models\nimport meta_py_r\n\nPageSize = (500, 300)\n\nclass ViewDialog(QDialog, forms.ui_network_view.Ui_network_view_dialog):\n\n def __init__(self, model, parent=None):\n super(ViewDialog, self).__init__(parent)\n self.setupUi(self)\n \n self.model = model\n self.dataset = model.dataset\n self.cur_outcome = model.current_outcome\n self.cur_follow_up = model.get_current_follow_up_name()\n \n self.populate_cbo_boxes()\n self.setup_signals()\n \n self.x_coord = 5\n self.y_coord = 5\n self.scene = QGraphicsScene(self)\n self.scene.setSceneRect(0, 0, PageSize[0], PageSize[1])\n self.network_viewer.setScene(self.scene)\n self.graph_network(self.cur_outcome, self.cur_follow_up)\n \n def setup_signals(self):\n QObject.connect(self.outcome_cbo_box, SIGNAL(\"currentIndexChanged(QString)\"),\n self.outcome_changed)\n QObject.connect(self.follow_up_cbo_box, SIGNAL(\"currentIndexChanged(QString)\"),\n self.follow_up_changed)\n \n def outcome_changed(self, new_outcome):\n self.cur_outcome = str(new_outcome)\n self.graph_network(self.cur_outcome, self.cur_follow_up)\n \n def follow_up_changed(self, new_follow_up):\n self.cur_follow_up = str(new_follow_up)\n self.graph_network(self.cur_outcome, self.cur_follow_up)\n \n def populate_cbo_boxes(self):\n self.outcome_cbo_box.addItems(self.dataset.get_outcome_names())\n self.follow_up_cbo_box.addItems(self.dataset.get_follow_up_names())\n\n # set the current item to reflect the selected/active outcome\n # and follow-up\n cur_outcome_index = self.outcome_cbo_box.findText(self.cur_outcome)\n self.outcome_cbo_box.setCurrentIndex(cur_outcome_index)\n \n cur_follow_up_index = self.follow_up_cbo_box.findText(self.cur_follow_up)\n self.follow_up_cbo_box.setCurrentIndex(cur_follow_up_index)\n \n def graph_network(self, outcome, follow_up):\n data_type = self.model.get_outcome_type(outcome, get_str=False)\n \n img_path = meta_py_r.ma_dataset_to_simple_network(\n table_model=self.model,\n data_type=data_type,\n outcome=outcome,\n follow_up=follow_up)\n pixmap = QPixmap(img_path)\n self.scene.addPixmap(pixmap)" }, { "alpha_fraction": 0.6732891798019409, "alphanum_fraction": 0.6870861053466797, "avg_line_length": 35.2400016784668, "blob_id": "3b1798c53265eccde960f1c79ab0ca598f8c940d", "content_id": "159eaebd5abd233d382b7cdb6d4a7a99ecca2012", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 1812, "license_type": "no_license", "max_line_length": 263, "num_lines": 50, "path": "/README.txt", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "+++++++++++++++++++++++++++++\n+ +\n+ OpenMetaAnalyst +\n+ +\n+++++++++++++++++++++++++++++\n\nTo run OpenMetaAnalyst from source, you'll need to install the corresponding dependencies. If you're interested in simply running the program (i.e., not from source) consider grabbing a binary build (for Windows and Mac OS X) from: \n\n http://www.cebm.brown.edu/openmeta\n\nOtherwise, you'll need to install the necessary R packages. \nFirst install the dependencies:\nFrom within a sudo-ed R session type:\n\n\t> install.packages(c(\"metafor\",\"lme4\",\"MCMCpack\",\"igraph\"))\n\n\n\t\nNext, you'll need to build and install the openmetar packages and altered HSROC (NOT THE ONE FROM CRAN) package and install them. These package are distributed with the source under the \"src/R\" directory. \n\n > R CMD build HSROC\n > R CMD build openmetar\n > sudo R CMD INSTALL HSROC_2.0.5.tar.gz\n > sudo R CMD INSTALL openmetar_1.0.tar.gz\n\nOn this branch, we have moved to R 3.1!\n\nOnce R is setup for OpenMeta, you'll need to install Python (we use 2.7) and the necessary libraries. You'll need PyQT (and QT: see http://www.riverbankcomputing.co.uk/software/pyqt/intro) installed -- we use PyQt 4.10; your mileage may vary with other versions. \n\nNext, install rpy2 (rpy.sourceforge.net/rpy2.html) in Python. Verify that all is well by executing:\n\n > import rpy2\n > from rpy2 import robjects \n\nAt the Python console.\n\nThat should be all you need. Once everything is setup, you can launch the program by typing:\n\n > python src/launch.py\n\nAt the console. This should fire up the GUI.\n\nFor running nosetests:\nthese should be run from within the src folder:\nnosetests -v test_meta_analysis.py\n\n(important)? dependency versions:\nR : 3.X\nmetafor: 1.6.0ish\npyqt4 : 4.10ish\n" }, { "alpha_fraction": 0.6193056106567383, "alphanum_fraction": 0.6256586909294128, "avg_line_length": 36.15553665161133, "blob_id": "26089b8cc16f9eca93003af9a48d04427fcb9296", "content_id": "5ff9c88def0997a34315ab18bbd29f434cfdfe0e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 40610, "license_type": "no_license", "max_line_length": 187, "num_lines": 1093, "path": "/src/R/openmetar/R/meta_reg.r", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "##################################################################\n# #\n# Byron C. Wallace #\n# George E. Dietz\n# Brown CEBM\n# Tufts Medical Center #\n# OpenMeta[analyst] #\n# --- #\n# meta_reg.r #\n##################################################################\n\nlibrary(metafor)\n\n#gfactor <- function(x, ref.value=NULL) {\n#\t### Transforms x in to a factor, with ref.value being the first level ###\n#\t\n#\t# Just set reference value to be the first value if ref.value not specified\n#\tif (is.null(ref.value)) {\n#\t\tref.value <- x[1]\n#\t}\n#\t\n#\t# sort levels, sticking ref.value at the front\n#\tlevels <- unique(x)\n#\tlevels.without.ref.value <- levels[levels!=ref.value]\n#\tlevels <- c(ref.value, sort(levels.without.ref.value))\n#\t\n#\tfactor(x, levels=levels)\n#}\n\n\nregression.wrapper <- function(data, mods.str, method, level, digits, btt=NULL) {\n\t# Construct call to rma\n\tif (!is.null(btt)) {\n\t\tbtt.str <- paste(\"c(\",paste(btt,collapse=\", \"),\")\", sep=\"\")\n\t\tcall_str <- sprintf(\"rma.uni(yi,vi, mods=%s, data=data, method=\\\"%s\\\", level=%f, digits=%d, btt=%s)\", mods.str, method, level, digits, btt.str)\n\t} else {\n\t\tcall_str <- sprintf(\"rma.uni(yi,vi, mods=%s, data=data, method=\\\"%s\\\", level=%f, digits=%d)\", mods.str, method, level, digits)\n\t\t\t\t\n\t}\n\n\texpr<-parse(text=call_str) # convert to expression\n\tres <- eval(expr) # evaluate expression\n\tres\n}\n\nmake.mods.str <-function(mods) {\n\t# Builds the mods string as specified by the information in mods\n\t# The order will be numeric, categorical, then interaction moderators\n\t# factors in data already assumed to be factors with ref.value set as first\n\t# level\n\t\n\tstr.els <- c() # mods string elements\n\t\n\t# numeric \n\tfor (mod in mods[[\"numeric\"]]) {\n\t\tstr.els <- c(str.els, mod)\n\t}\n\t\n\t# categorical\n\tfor (mod in mods[[\"categorical\"]]) {\n\t\tstr.els <- c(str.els, mod)\n\t}\n\t\n\t# interactions\n\tfor (interaction in names(mods[['interactions']])) {\n\t\tstr.els <- c(str.els, interaction)\n\t}\n\t\t\n\n\t\n\t# fix for issue #122 of OpenMEE\n\tif (length(str.els)!=0) {\n\t\t# normal case\n\t\tmods.str <- paste(\"~\", paste(str.els,collapse=\" + \"), sep=\" \")\n\t} else { # no string elements\n\t\tmods.str <- \"~ 1\"\n\t}\n\t\n\t#cat(mods.str,\"\\n\")\n\tmods.str\n}\n\nmake.design.matrix <- function(strat.cov, mods, cond.means.data, data) {\n\t# Make design matrix for conditional means\n\t# strat.cov is the name of the covariate in data to stratify over\n\t# This code is very sensitive to the fact that when there is an interaction\n\t# of the form A:B, the coefficients are given such that the A coefficients iterate\n\t# before the B coefficients\n\t\n\tnlevels <- length(levels(data[[strat.cov]])) # num of levels in strat.cov\n\trownames <- levels(data[[strat.cov]])\n\tcolnames <- c(\"Intercept\")\n\tdsn.matrix <- matrix(rep(1,nlevels)) # intercept column\n\t\n\t# 1 column for each numeric moderator\n\tfor (mod in mods[[\"numeric\"]]) {\n\t\tvalue <- cond.means.data[[mod]]\n\t\tdsn.matrix <- cbind(dsn.matrix,rep(value,nlevels))\n\t\tcolnames<-c(colnames, mod)\n\t}\n\t\n\t### NOTE: In following code mod.matrix is the part of the matrix to be stuck\n\t### on to dsn.matrix corresponding to a categorical moderator or\n\t### an interaction\n\t# qi-1 columns for each categorical modertor where q is the # of levels of\n\t# the moderator\n\tfor (mod in mods[[\"categorical\"]]) {\n\t\tl.mod <- levels(data[[mod]]) # levels of the moderator\n\t\tmod.matrix <- c()\n\t\t\n\t\tif (mod==strat.cov) {\n\t\t\t# iterate over the levels of the moderator\n\t\t\tfor (x in l.mod) {\n\t\t\t\tx.coded <- coded.cat.mod.level(x, l.mod)\n\t\t\t\tmod.matrix <- rbind(mod.matrix,x.coded)\n\t\t\t}\n\t\t} else {\n\t\t\t# just replicate the coding of the desired level\n\t\t\tvalue <- cond.means.data[[mod]]\n\t\t\tlvl.coded <- coded.cat.mod.level(value, l.mod)\n\t\t\tfor (x in 1:nlevels) {\n\t\t\t\tmod.matrix <- rbind(mod.matrix, lvl.coded)\n\t\t\t}\n\t\t} # end of else\n\n\t\tdsn.matrix <- cbind(dsn.matrix, mod.matrix)\n\t\tcolnames<-c(colnames, paste(mod, l.mod[2:length(l.mod)],sep=\"\"))\n\t} # end for categorical\n\t\n\t\n\t# interactions\n\tinteraction.mod.matrix <- c()\n\tfor (interaction in names(mods[['interactions']])) {\n\t\tinteraction.vars <- mods[['interactions']][[interaction]]\n\t\t# What type of interaction? CAT:CAT, CAT:CONT, or CONT:CONT?\n\t\t\n\t\tcat.cat <- (interaction.vars[1] %in% mods[['categorical']]) && (interaction.vars[2] %in% mods[['categorical']])\n\t\t# same thing\n\t\tcat.cont <- (interaction.vars[1] %in% mods[['categorical']]) && (interaction.vars[2] %in% mods[['numeric']])\n\t\tcont.cat <- (interaction.vars[1] %in% mods[['numeric']]) && (interaction.vars[2] %in% mods[['categorical']])\n\t\tcat.cont <- cat.cont || cont.cat\n\t\t\n\t\tif (cat.cat) {\n\t\t\t# two categorical variables Note: (p-1)*(q-1) columns where p and q\n\t\t # are the # of levels in the first categorical var and the 2nd\n\t\t # respectively\n\t\t\t\n\t\t\tcat1.levels <- levels(data[[interaction.vars[1]]])\n\t\t\tcat2.levels <- levels(data[[interaction.vars[2]]])\n\t\t\t\n\t\t\tif (strat.cov %in% interaction.vars) {\n\t\t\t\t# One of the variables in the interaction is the stratification variable\t\t\n\t\t\t\tstrat.cov.is.first <- strat.cov == interaction.vars[1]\n\t\t\t\tif (strat.cov.is.first) {\n\t\t\t\t\t# iterate over levels of first cov, keeping 2nd cov level constant\n\t\t\t\t\tvalue2 <- cond.means.data[[interaction.vars[2]]]\n\t\t\t\t\tmod.matrix <- c()\n\t\t\t\t\tfor (value1 in cat1.levels) {\n\t\t\t\t\t\trow.vector <- get.row.vector.cat.cat(\n\t\t\t\t\t\t\t\tcat1.levels, cat2.levels,\n\t\t\t\t\t\t\t\tvalue1, value2)\n\t\t\t\t\t\tmod.matrix <- rbind(mod.matrix,row.vector)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\t# strat.cov is 2nd\n\t\t\t\t\t# Iterate over levels of 2nd cov, keeping the 1st cov level\n\t\t\t\t # constant.\n\t\t\t\t\tvalue1 = cond.means.data[[interaction.vars[1]]]\n\t\t\t\t\tmod.matrix <- c()\n\t\t\t\t\tfor (value2 in cat2.levels) {\n\t\t\t\t\t\trow.vector <- get.row.vector.cat.cat(\n\t\t\t\t\t\t\t\tcat1.levels, cat2.levels,\n\t\t\t\t\t\t\t\tvalue1, value2)\n\t\t\t\t\t\tmod.matrix <- rbind(mod.matrix,row.vector)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t# Neither of the variables in the interaction is the stratification variable\n\t\t\t\tvalue1 = cond.means.data[[interaction.vars[1]]]\n\t\t\t\tvalue2 = cond.means.data[[interaction.vars[2]]]\n\t\t\t\t\n\t\t\t\trow.vector <- get.row.vector.cat.cat(\n\t\t\t\t\t\tcat1.levels, cat2.levels,\n\t\t\t\t\t\tvalue1, value2)\n\t\t\t\t\n\t\t\t\tmod.matrix <- c()\n\t\t\t\tfor (i in 1:nlevels) {\n\t\t\t\t\tmod.matrix <- rbind(mod.matrix,row.vector)\n\t\t\t\t}\n\t\t\t} # end else strat.cov cat:cat\n\t\t\t\n\t\t\t### Generate column labels\n\t\t\t# names of interaction vars\n\t\t\tintvar1 <- interaction.vars[1]\n\t\t\tintvar2 <- interaction.vars[2]\n\t\t\tcol.names.for.interaction <- c()\n\t\t\tfor (y in cat2.levels[2:length(cat2.levels)]) {\n\t\t\t\tfor (x in cat1.levels[2:length(cat1.levels)]) {\n\t\t\t\t\tcol.names.for.interaction <- c(col.names.for.interaction, paste(intvar1, x, \":\",intvar2,y,sep=\"\"))\n\t\t\t\t}\n\t\t\t}\n\t\t\tcolnames<-c(colnames, col.names.for.interaction)\n\t\t} else if (cat.cont) {\n\t\t\t# one categorical, one continuous # (p-1) columns\n\t\t\tif (strat.cov %in% interaction.vars) {\n\t\t\t\t# One of the variables in the interaction is the stratification variable\n\t\t\t\tif (strat.cov==interaction.vars[1]) {\n\t\t\t\t\tstrat.levels <- levels(data[[interaction.vars[1]]])\n\t\t\t\t\tcont.val <- cond.means.data[[interaction.vars[2]]]\n\t\t\t\t} else {\n\t\t\t\t\tstrat.levels <- levels(data[[interaction.vars[2]]])\n\t\t\t\t\tcont.val <- cond.means.data[[interaction.vars[1]]]\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tmod.matrix <- c()\n\t\t\t\tfor (x in strat.levels) {\n\t\t\t\t\trow.vector <- get.row.vector.cat.cont(strat.levels, x, cont.val)\n\t\t\t\t\tmod.matrix <- rbind(mod.matrix,row.vector)\n\t\t\t\t}\n\t\t\t\t\n\t\t } else {\n \t# Neither of the variables in the interaction is the stratification variable\n value1 <- cond.means.data[[interaction.vars[1]]]\n value2 <- cond.means.data[[interaction.vars[2]]]\n\t\t\t\t# Which of the interactions is the numeric, which the categorical?\n\t\t\t\tif (class(value1) == \"numeric\") {\n\t\t\t\t\tcont.val <- value1\n\t\t\t\t\tcat.val <- value2\n\t\t\t\t\tcat.levels <- levels(data[[interaction.vars[2]]])\n\t\t\t\t} else {\n\t\t\t\t\tcont.val <- value2\n\t\t\t\t\tcat.val <- value1\n\t\t\t\t\tcat.levels <- levels(data[[interaction.vars[1]]])\n\t\t\t\t}\n\t\t\t\n\t\t\t\trow.vector <- get.row.vector.cat.cont(cat.levels, cat.val, cont.val)\n\t\t\t\tmod.matrix <- c()\n\t\t\t\tfor (i in 1:nlevels) {\n\t\t\t\t\tmod.matrix <- rbind(mod.matrix,row.vector)\n\t\t\t\t}\n\t\t\t}\n\t\t\t\n\t\t\t### Make column labels\n\t\t\tintVar1 <- interaction.vars[1]\n\t\t\tintVar2 <- interaction.vars[2]\n\t\t\tif (intVar1 %in% mods[[\"numeric\"]]) {\n\t\t\t\tcont.var <- intVar1\n\t\t\t\tcat.var <- intVar2\n\t\t\t} else {\n\t\t\t\tcont.var <- intVar2\n\t\t\t\tcat.var <- intVar1\n\t\t\t}\n\t\t\tcat.levels <- levels(data[[cat.var]])\n\t\t\t# Continuous label comes first, followed by cat levels\n colnames <- c(colnames, paste(cont.var,\":\",cat.levels[2:length(cat.levels)],sep=\"\"))\n\t\t\t### END of make column labels\n\t\t} else {\n\t\t\t# two continuous variables # 1 column\n\t value1 <- cond.means.data[[interaction.vars[1]]]\n\t value2 <- cond.means.data[[interaction.vars[2]]]\n\t\t\tmod.matrix <- rep(value1*value2, nlevels)\n\t\t\t\n\t\t\tcolnames <- c(colnames, paste(interaction.vars[1],\":\",interaction.vars[2], sep=\"\"))\n\t\t}\n\t\tinteraction.mod.matrix <- cbind(interaction.mod.matrix,mod.matrix)\n\t\t\n\t} # end for interactions\n\tdsn.matrix <- cbind(dsn.matrix, interaction.mod.matrix)\n\t# Set helpful dimnames\n\tdimnames(dsn.matrix) <- list(rownames, colnames)\n\treturn(dsn.matrix)\n}\n\n#mod.matrix.for.strat.cov.in.cat.cat <- function()\nget.row.vector.cat.cat <- function(cat1.levels, cat2.levels, value1, value2) {\t\n\t# Returns a row vector for part of the mod.matrix for a cat:cat interaction\n\t# given the levels of the categories and the values the categories take\n\trow.vector <- c()\n\t# We need to generate a vector, then replicate it \n\t# iterate over column values\n\t# Note: we ignore the first level in each category since it is naturally\n\t# coded\n\tfor (y in cat2.levels[2:length(cat2.levels)]) {\n\t\t# rma varies 1st covariate faster than 2nd\n\t\tfor (x in cat1.levels[2:length(cat1.levels)]) {\n\t\t\trow.vector <- c(row.vector, ifelse(y==value2 && x==value1, 1,0))\n\t\t}\n\t}\n\t\n\treturn(row.vector)\n}\n\nget.row.vector.cat.cont <- function(cat.levels, cat.val, cont.val) {\n\t# cat.levels: levels of categorical covariate\n\t# cat.val: chosen value of categorical variable (a level)\n\t# cont.val: chosen value of continuous variable\n\t\n\trow <- c()\n\t# ignore first level since it is naturally coded\n\tfor (x in cat.levels[2:length(cat.levels)]) {\n\t\trow <- c(row, ifelse(x==cat.val, 1, 0))\n\t}\n\trow <- cont.val * row\n\treturn(row)\n}\n\n\ncoded.cat.mod.level <- function(lvl, l.mod) {\n\t# gives a coded representation of the moderator according to the order\n\t# of levels in l.mod\n\t# l.mod: levels in the moderator\n\t# lvl chosen lvl to get the coding for\n\t#\n\t# E.g. if levels(moderator) == c(\"USA\",\"CANADA\",\"CHINA\")\n\t# lvl\n\t# \"USA\" --> c(0,0)\n\t# \"CANADA\" --> c(1,0)\n\t# \"CHINA\" -- > c(0,1)\n\t\n\t# Find index of lvl in l.mod\n\tindex <- match(lvl, l.mod)\n\tn.levels <- length(l.mod)\n\t\n\t# Make coding matrix e.g.\n\t# USA 0 0 # zero vector\n\t# CANADA 1 0 # identity matrix \n\t# CHINA 0 1 # \n\tcode.matrix <- rbind(rep(0,n.levels-1),diag(n.levels-1))\n\tcode.matrix[index,]\n}\n\n\n\ng.meta.regression <- function(\n data,\n mods,\n method,\n level,\n digits,\n measure,\n btt=NULL,\n make.coeff.forest.plot=FALSE,\n exclude.intercept=FALSE, # For coefficient forest plot\n disable.plots = FALSE)\n{\n\t# This is s a thin wrapper to metafor's meta regression functionality\n\t# in order to let R do the dummy coding for us\n\t#\n\t# mods: list(numeric=c(...numeric moderators...),\n\t# categorical=c(...categorical moderators...),\n\t# interactions=list(\"A:B\"=c(\"A\",\"B\"),\"B:C\"=c(\"B\",C\"),...)\n\t# )\n\t# Note that the interaction names should be as they appear in the mods\n\t# string formula\n\t# data: should be a dataframe of the type that metafor likes ie\n\t# yi and vi for the effect and variance columns\n\t# slab holds study names\n\t# the parts that are 'factors' have already been made in to factors with\n\t# the appropriate reference values\n\t\n\t# mods.str: string to be passed to metafor to implement the moderators\n\t# e.g. ~ gfactor(alloc)+ablat+gfactor(country)+gfactor(alloc):gfactor(country)\n\tmods.str <- make.mods.str(mods)\n\t\n\t# obtain regression result rma.uni\n\tres <- regression.wrapper(data, mods.str, method, level, digits,btt)\n\t\n\t# Add residuals to additional values output\n\tresiduals <- rstandard(res, digits=digits) # is a dataframe\n\tresiduals$slab <- data$slab\n\tres.and.residuals <- res\n\tres.and.residuals$residuals <- residuals\n\tres.and.residuals.info <- c(rma.uni.value.info(),\n\t\t\t list(residuals=list(type=\"blob\", description=\"Standardized residuals for fitted models\")))\n\t\n\tSummary <- paste(capture.output(res), collapse=\"\\n\") # convert print output to a string\n\t# add regression model formula to output\n\tregression.model.formula.str <- sprintf(\"Regression model formula: yi %s\", mods.str)\n\tSummary <- paste(Summary, regression.model.formula.str, sep=\"\\n\\n\")\n\t# add regresison model equation to output\n\test.coeffs <- round(res$b[,1], digits=digits)\n\ttmp <- est.coeffs[2:length(est.coeffs)] # w/o intercept\n\ttmp <- paste(tmp, names(tmp), sep=\"*\")\n\ttmp <- paste(tmp, collapse=\" + \")\n\treg.equation <- paste(est.coeffs[1],tmp, sep=\" + \")\n\treg.equation.str <- sprintf(\"Regression model equation: %s\", reg.equation)\n\tSummary <- paste(Summary, reg.equation.str, sep=\"\\n\")\n\t\n\t# add more output by Marc\n\tmodel.formula.str <- paste(\"yi\", mods.str)\n\tmodel.formula <- eval(model.formula.str)\n\tmore.output <- reg.output.helper(theData=data, rma.results=res, model.formula=model.formula, digits=digits)\n\tpre.summary <- \"\"\n\tfor (name in names(more.output)) {\n\t\tdashes <- paste(rep(\"-\", nchar(name)+2), collapse=\"\")\n\t\titem.str <- sprintf(\"%s:\\n%s\\n%s\", name, dashes, more.output[[name]])\n\t\tpre.summary <- paste(pre.summary, item.str, sep=\"\\n\\n\")\n\t}\n\tSummary <- paste(pre.summary, Summary, sep=\"\\n\\n\")\n\t\n\tresults <- list(#\"images\"=images,\n\t\t\t\"Summary\"=Summary,\n\t\t\t#\"plot_names\"=plot.names,\n\t\t\t#\"plot_params_paths\"=plot.params.paths,\n\t\t\t\"res\"=res.and.residuals, #res,\n\t\t\t\"res.info\"=res.and.residuals.info)# rma.uni.value.info())\n\t\n\t########################################################################\n\n\timages <- c()\n\tplot.names <- c()\n\tplot.params.paths <- c()\n\t# 1 continuous covariate, no categorical covariates\n\tif (is.single.numeric.covariate(mods) && !disable.plots) {\n\t\t# if only 1 continuous covariate, create reg. plot\n\t\tbetas <- res$b\n\t\tfitted.line <- list(intercept=betas[1], slope=betas[2])\n\t\tplot.path <- \"./r_tmp/reg.png\"\n\t cov.name <- mods[['numeric']][[1]]\n\t\tcov.vals <- data[[cov.name]]\n\t\tplot.data <- g.create.plot.data.reg(data, cov.name, cov.vals, measure, level, fitted.line)\n\t\t\n\t\t# @TODO x and y labels ought to be passed in, probably\n\t\t\n\t\tplot.data$xlabel <- cov.name\n\t\t\n\t\tscale.str <- g.get.scale(measure)\n\t\tif ((scale.str==\"standard\") || (scale.str==\"arcsine\")) {\n\t\t\tscale.str <- \"\"\n\t\t\t# This is for the y-axis label on regression plot - don't add \"standard\" or \"arcsine\" to label.\n\t\t}\n\t\tplot.data$ylabel <- paste(scale.str, \" \", pretty.metric.name(as.character(measure)), sep=\"\")\n\t\tmeta.regression.plot(plot.data, plot.path)\n\t\t\n\t\t# write the plot data to disk so we can save it\n\t\t# @TODO will want to write the params data, too,\n\t\t# eventually\n\t\tplot.data.path <- save.plot.data(plot.data)\n\t\t\n\t\timages <- c(\"Regression Plot\"=plot.path)\n\t\tplot.names <- c(\"reg.plot\"=\"reg.plot\")\n\t\treg.plot.params.path <- save.plot.data(plot.data)\n\t\tplot.params.paths <- c(\"Regression Plot\"=plot.data.path)\n\t\t\n\t\t# add regression plot to results\n\t\tresults[['images']] <- images\n\t\tresults[['plot_names']] <- plot.names\n\t\tresults[['plot_params_paths']] <- plot.params.paths\n\t\t########################################################################\n\t}\n\t\n\tcoeff.forest.plot.path <- paste(\"r_tmp/\", \"bforestplot_\", as.character(as.numeric(Sys.time())), sep = \"\")\n\t\n\tif (make.coeff.forest.plot && !disable.plots) {\n\t\tforest.plot.of.regression.coefficients(as.vector(res$b), res$ci.lb, res$ci.ub, labels=rownames(res$b), exclude.intercept=exclude.intercept, filepath=coeff.forest.plot.path)\n\t\timages <- c(images, \"Forest Plot of Coefficients\"=paste(coeff.forest.plot.path,\".png\",sep=\"\"))\n\t\tplot.names <- c(plot.names, \"coeff.forest.plot\"=\"coeff.forest.plot\")\n\t\tplot.params.paths <- c(\"Forest Plot of Coefficients\"=coeff.forest.plot.path)\n\t}\n\t\n\t# add regression plot to results\n\tif (length(images)>0)\n\t\tresults[['images']] <- images\n\tif (length(plot.names)>0)\n\t\tresults[['plot_names']] <- plot.names\n\tif (length(plot.params.paths)>0)\n\t\tresults[['plot_params_paths']] <- plot.params.paths\n\t\n\tresults\n}\n\nis.single.numeric.covariate <- function(mods) {\n\t# Does mods only describe a single numeric covariate?\n\tcount.numeric <- length(mods[['numeric']])\n\tcount.categorical <- length(mods[['categorical']])\n\tcount.interactions <- length(mods[['interactions']])\n\t\n\tif (count.numeric==1 && count.categorical + count.interactions == 0) {\n\t\treturn(TRUE)\n\t} else {\n\t\treturn(FALSE)\n\t}\n}\n\n# create regression plot data for g.meta.regression function\ng.create.plot.data.reg <- function(reg.data, cov.name, cov.vals, measure, level, fitted.line) {\n\tscale.str <- g.get.scale(measure)\n\tplot.data <- list(\"fitted.line\" = fitted.line,\n\t\t\ttypes = c(rep(0, length(reg.data$slab))),\n\t\t\tscale = scale.str,\n\t\t\tcovariate = list(varname = cov.name, values = cov.vals))\n\talpha <- 1.0-(level/100.0)\n\tmult <- abs(qnorm(alpha/2.0))\n\t\n\t\n\ty <- reg.data$yi\n\tse <- sqrt(reg.data$vi)\n\teffects <- list(ES = y,\n\t\t\tse = se)\n\tplot.data$effects <- effects\n\t\n\t###\n\t# @TODO; these need to be set by the user,\n\t# will probably be placed on the params object\n\tplot.data$sym.size <- 1\n\tplot.data$lcol <- \"darkred\"\n\tplot.data$lweight <- 3\n\tplot.data$lpattern <- \"dotted\"\n\tplot.data$plotregion <- \"n\"\n\tplot.data$mcolor <- \"darkgreen\"\n\tplot.data$regline <- TRUE\n\t\n\tplot.data\n}\n\n# get scale for g.meta.regression function and derivatives\ng.get.scale <- function (measure) \n{\n\tif (metric.is.log.scale(measure)) {\n\t\tscale <- \"log\"\n\t}\n\telse if (metric.is.logit.scale(measure)) {\n\t\tscale <- \"logit\"\n\t}\n\telse if (metric.is.arcsine.scale(measure)) {\n\t\tscale <- \"arcsine\"\n\t}\n\telse {\n\t\tscale <- \"standard\"\n\t}\n\tscale\n}\n\ng.meta.regression.cond.means <- function(data, mods, method, level, digits, strat.cov, cond.means.data, btt=NULL) {\n\t# Same as g.meta.regression. except we have conditional means output\n\t# strat_cov: the categorical covariate (name) to stratify the results of the conditional means over\n\t# cond.means.data: The values for the other covariates given as a list:\n\t# List(cov1_name=cov1_val, cov2_cat_name=cov2_level,...)\n\t\n\tmods.str <- make.mods.str(mods)\n\t\n\t# obtain regression result rma.uni\n\tres <- regression.wrapper(data, mods.str, method, level, digits,btt)\n\t\n\t### Generate conditional means output\n\tA <- make.design.matrix(strat.cov, mods, cond.means.data, data)\n\tcat(\"Design Matrix:\\n\", A)\n\tnew_betas <- A %*% res$b\n\tnew_cov <- A %*% res$vb %*% t(A)\n\tnew_vars <- diag(new_cov)\n\talpha <- 1.0-(level/100.0)\n\tmult <- abs(qnorm(alpha/2.0))\n\tnew_lowers <- new_betas - mult*sqrt(new_vars)\n\tnew_uppers <- new_betas + mult*sqrt(new_vars)\n\tnew_se <- sqrt(new_vars)\n\t\n\tcond.means.df <- data.frame(cond.mean=new_betas, se=new_se, var=new_vars, ci.lb=new_lowers, ci.ub=new_uppers)\n\t\n\t# Construct pretty output\n\tcond.means.df.rounded <- round(cond.means.df, digits=digits)\n\tcond.means.df.str <- paste(capture.output(cond.means.df.rounded), collapse=\"\\n\")\n\tcond.means.data.names <- sort(names(cond.means.data))\n\tcond.means.data.vals <- sapply(cond.means.data.names, function(x) cond.means.data[[x]])\n\tlines = paste(cond.means.data.names, cond.means.data.vals, sep=\": \")\n\tother.vals.str <- paste(lines, sep=\"\\n\")\n\tcond.means.summary <- paste(\"The conditional means are calculated over the levels of: \", strat.cov,\n\t\t\t\"\\nThe other covariates had selected values of:\\n\",\n\t\t\tother.vals.str,\"\\n\",cond.means.df.str,sep=\"\")\n\t\n\t### END of conditional means output generation\n\t\n\tresults<-list(\n\t\t\t \"Summary\"=paste(capture.output(res), collapse=\"\\n\"),\n\t\t\t\t \"res\"=res,\n\t\t\t\t \"res.info\"=rma.uni.value.info(),\n\t\t\t\t \"Conditional Means Summary\"=cond.means.summary,\n\t\t\t\t \"res.cond.means\"=cond.means.df\n\t\t\t\t)\n}\n\ng.bootstrap.meta.regression <- function(data, mods, method, level, digits,\n\t\tn.replicates, histogram.title=\"\", bootstrap.plot.path=\"./r_tmp/bootstrap.png\",\n\t\tbtt=NULL) {\n\t# Bootstrapped meta-regression\n\t# A subset is valid if, for each categorical variable, all the levels are\n\t# preset\n\t\n\tmods.str <- make.mods.str(mods)\n\t\n\t### obtain overall regression result rma.uni\n\t##res <- regression.wrapper(data, mods.str, method, level, digits,btt=NULL)\n\t\n\t###### Bootstrap\n\tmax.failures <- 5*n.replicates # # failures generating test statistic before we give up\n\t# Count number of levels for each categorical covariate\n\tcat.mods.level.counts <- list()\n\tfor (mod in mods[[\"categorical\"]]) {\n\t\tn.levels <- length(levels(data[[mod]]))\n\t\tcat.mods.level.counts[[mod]] <- n.levels\n\t}\n\t\n\t# Statistic passed to boot\n\tmeta.reg.statistic <- function(data, indices) {\n\t\tok = FALSE\n\t\tcat(\"failures: \",failures)\n\t\twhile (!ok) {\n\t\t\tif (failures > max.failures) {\n\t\t\t stop(\"Number of failed attempts exceeded 5x the number of replicates\")\n\t\t\t}\n\t\t\tif (!subset.ok(data,indices)) {\n\t\t\t\t# Subset chosen was not ok\n\t\t\t\tfailures <<- failures+1\n\t\t\t\tindices <- sample.int(nrow(data), size=length(indices), replace=TRUE)\n\t\t\t\tcat(\"subset not ok\\n\")\n\t\t\t\tnext\n\t\t\t}\n\t\t\t\n\t\t\tres.tmp <- tryCatch({\n\t\t\t\t\t\tregression.wrapper(data[indices,], mods.str, method, level, digits,btt)\n\t\t\t\t\t }, error = function(e) {\n\t\t\t\t\t\tfailures <<- failures + 1\n\t\t\t\t\t\tindices <- sample.int(nrow(data), size=length(indices), replace=TRUE)\n\t\t\t\t\t\tcat(\"Error in regression wrapper: \",e$message,\"\\n\")\n\t\t\t\t\t\tnext\n\t\t\t\t\t })\n\t\t\t# Everything worked alright\n\t\t\tok <- TRUE\n\t\t} # end while\n\t\tres.tmp$b[,1] # b is a matrix\n\t}\n\t\n\tsubset.ok <- function(data, indices) {\n\t\t# Are all the categorical levels present in the subset?\n\t\tdata.subset = data[indices,]\n\t\t\n\t\tfor (mod in mods[[\"categorical\"]]) {\n\t\t\tn.levels <- length(unique(data[[mod]]))\n\t\t\tif (n.levels != cat.mods.level.counts[[mod]]) {\n\t\t\t\treturn(FALSE)\n\t\t\t}\n\t\t}\n\t\treturn(TRUE)\n\t}\n\t\n\t# Run the bootstrap analysis\n\tfailures <- 0\n\tres.boot <- boot(data, statistic=meta.reg.statistic, R=n.replicates)\n\t\n\t### Construct output\n\tcoeff.names <- names(res.boot$t0)\n\tb=res.boot$t0\n\tci.lb <- c()\n\tci.ub <- c()\n\tfor (i in 1:length(res.boot$t0)) {\n\t\tci <- boot.ci(boot.out=res.boot, type=\"norm\", index=i, conf=level/100)# conf. interval\n\t\tci.lb <- c(ci.lb, ci[[\"normal\"]][2])\n\t\tci.ub <- c(ci.ub, ci[[\"normal\"]][3])\n\t}\n\tboot.summary.df <- data.frame(estimate=b, \"Lower Bound\"=ci.lb, \"Upper Bound\"=ci.ub)\n\trownames(boot.summary.df) <- coeff.names\n\t# summary text\n boot.summary.df.rounded <- round(boot.summary.df, digits=digits)\n\tboot.summary.df.rounded.str <- paste(capture.output(boot.summary.df.rounded), collapse=\"\\n\")\n\tsummary.txt <- sprintf(\"# Bootstrap replicates: %d\\n# of failures: %d\\n\\n%s\", n.replicates,failures, boot.summary.df.rounded.str)\n\n\t\n\t# Make histograms\n\txlabels <- coeff.names\n\tpng(file=bootstrap.plot.path, width = 480, height = 480*length(xlabels))\n\tplot.custom.boot(res.boot,\n\t\t\ttitle=as.character(histogram.title),\n\t\t\txlabs=xlabels,\n\t\t\tci.lb=boot.summary.df[[\"Lower Bound\"]],\n\t\t\tci.ub=boot.summary.df[[\"Upper Bound\"]])\n\tgraphics.off()\n\timages <- c(\"Histograms\"=bootstrap.plot.path)\n\n\t# Output results\n\tresults<-list(\n\t\t \"images\"=images,\n\t\t\t\"Bootstrapped Meta Regression Summary\"=summary.txt\n\t\t\t#\"res.boot\"=res.boot\n\t)\n}\n\ng.bootstrap.meta.regression.cond.means <- function(\n data, mods, method, level, digits, strat.cov, cond.means.data,\n n.replicates, histogram.title=\"\", bootstrap.plot.path=\"./r_tmp/bootstrap.png\",\n\tbtt=NULL) {\n\t# Bootstrapped meta-regression Conditional means\n\t# A subset is valid if, for each categorical variable, all the levels are\n\t# preset\n\t\n\tmods.str <- make.mods.str(mods)\n\t\n\t### Generate conditional means\n\tA <- make.design.matrix(strat.cov, mods, cond.means.data, data)\n\n\t###### Bootstrap\n\tmax.failures <- 5*n.replicates # # failures generating test statistic before we give up\n\t# Count number of levels for each categorical covariate\n\tcat.mods.level.counts <- list()\n\tfor (mod in mods[[\"categorical\"]]) {\n\t\tn.levels <- length(levels(data[[mod]]))\n\t\tcat.mods.level.counts[[mod]] <- n.levels\n\t}\n\t\n\t# Statistic passed to boot\n\tcond.means.reg.statistic <- function(data, indices) {\n\t\tok = FALSE\n\t\tcat(\"failures: \",failures)\n\t\twhile (!ok) {\n\t\t\tif (failures > max.failures) {\n\t\t\t\tstop(\"Number of failed attempts exceeded 5x the number of replicates\")\n\t\t\t}\n\t\t\tif (!subset.ok(data,indices)) {\n\t\t\t\t# Subset chosen was not ok\n\t\t\t\tfailures <<- failures+1\n\t\t\t\tindices <- sample.int(nrow(data), size=length(indices), replace=TRUE)\n\t\t\t\tcat(\"subset not ok\\n\")\n\t\t\t\tnext\n\t\t\t}\n\t\t\t\n\t\t\tres.tmp <- tryCatch({\n\t\t\t\t\t\tregression.wrapper(data[indices,], mods.str, method, level, digits,btt)\n\t\t\t\t\t}, error = function(e) {\n\t\t\t\t\t\tprint(\"FAILURE FAILURE FAILURE\")\n\t\t\t\t\t\tfailures <<- failures + 1\n\t\t\t\t\t\tindices <- sample.int(nrow(data), size=length(indices), replace=TRUE)\n\t\t\t\t\t\tcat(\"Error in regression wrapper: \",e$message,\"\\n\")\n\t\t\t\t\t\tnext\n\t\t\t\t\t})\n\t\t\t# Everything worked alright\n\t\t\tok <- TRUE\n\t\t} # end while\n\n\t\ttmp.betas <- A %*% res.tmp$b\n\t\ttmp.betas[,1]\n\n\t}\n\t\n\tsubset.ok <- function(data, indices) {\n\t\t# Are all the categorical levels present in the subset?\n\t\tdata.subset = data[indices,]\n\t\t\n\t\tfor (mod in mods[[\"categorical\"]]) {\n\t\t\t# issue #205 (OpenMEE) -- to be changed data to data.subset\n\t\t\t# here to make sure all levels are present in \n\t\t\t# the sample\n\t\t\tn.levels <- length(unique(data.subset[[mod]]))\n\t\t\tif (n.levels != cat.mods.level.counts[[mod]]) {\n\t\t\t\treturn(FALSE)\n\t\t\t}\n\t\t}\n\t\treturn(TRUE)\n\t}\n\t\n\t# Run the bootstrap analysis\n\tfailures <- 0\n\tres.boot <- boot(data, statistic=cond.means.reg.statistic, R=n.replicates)\n\t\n\t### Construct output\n\tcoeff.names <- levels(data[[strat.cov]])\n\tb=res.boot$t0\n\tci.lb <- c()\n\tci.ub <- c()\n\tfor (i in 1:length(res.boot$t0)) {\n\t\tci <- boot.ci(boot.out=res.boot, type=\"norm\", index=i, conf=level/100)# conf. interval\n\t\tci.lb <- c(ci.lb, ci[[\"normal\"]][2])\n\t\tci.ub <- c(ci.ub, ci[[\"normal\"]][3])\n\t}\n\tboot.summary.df <- data.frame(cond.mean=b, \"Lower Bound\"=ci.lb, \"Upper Bound\"=ci.ub)\n\trownames(boot.summary.df) <- coeff.names\n\t\n\t### Summary text\n\tboot.summary.df.rounded <- round(boot.summary.df, digits=digits)\n\tboot.summary.df.rounded.str <- paste(capture.output(boot.summary.df.rounded), collapse=\"\\n\")\n\tbootstrap.summary <- sprintf(\"Bootstrap:\\n # Bootstrap replicates: %d\\n # of failures: %d\", n.replicates,failures)\n\t# Conditional means summary\n\tcond.means.data.names <- sort(names(cond.means.data))\n\tcond.means.data.vals <- sapply(cond.means.data.names, function(x) cond.means.data[[x]])\n\tlines = paste(cond.means.data.names, cond.means.data.vals, sep=\": \")\n\tother.vals.str <- paste(lines, sep=\"\\n\")\n\tcond.means.summary <- paste(\"The conditional means are calculated over the levels of: \", strat.cov,\n\t\t\t\"\\nThe other covariates had selected values of:\\n\",\n\t\t\tother.vals.str,sep=\"\")\n\tsummary.txt <- sprintf(\"%s\\n%s\\nResults:\\n%s\", bootstrap.summary, cond.means.summary,boot.summary.df.rounded.str)\n\t\n\t# Make histograms\n\txlabels <- coeff.names\n\tpng(file=bootstrap.plot.path, width = 480, height = 480*length(xlabels))\n\tplot.custom.boot(res.boot,\n\t\t\ttitle=as.character(histogram.title),\n\t\t\txlabs=xlabels,\n\t\t\tci.lb=boot.summary.df[[\"Lower Bound\"]],\n\t\t\tci.ub=boot.summary.df[[\"Upper Bound\"]])\n\tgraphics.off()\n\timages <- c(\"Histograms\"=bootstrap.plot.path)\n\t\n\t# Output results\n\tresults<-list(\n\t\t\t\"images\"=images,\n\t\t\t\"Bootstrapped Conditional Means Meta Regression Summary\"=summary.txt,\n\t\t\t\"res\"=boot.summary.df\n\t)\n}\n\n\nmeta.regression <- function(reg.data, params, cond.means.data=NULL, stop.at.rma=FALSE) {\n\tcov.data <- extract.cov.data(reg.data)\n\tcov.array <- cov.data$cov.array\n\tcat.ref.var.and.levels <- cov.data$cat.ref.var.and.levels\n\n\t# remove when and if method dialog is added\n\tmethod <- as.character(params$rm.method)\n \n\n\t\n\tres<-rma.uni(yi=reg.data@y, sei=reg.data@SE, [email protected],\n\t\t\t\t\tlevel=params$conf.level, digits=params$digits,\n\t\t\t\t\tmethod=method, mods=cov.array)\n\tpure.res<-res\n\t# Used for when we just need the intermediate results (e.g. bootstrapping)\n\tif (stop.at.rma) {\n\t\treturn(res) \n\t}\t\n\t\t\t\t\n# if (class(res)[1] != \"try-error\") {\n display.data <- cov.data$display.data\n reg.disp <- create.regression.display(res, params, display.data)\n \n\t # 1 continuous covariate, no categorical covariates\n if (display.data$n.cont.covs==1 & length(display.data$factor.n.levels)==0) {\n # if only 1 continuous covariate, create reg. plot\n betas <- res$b\n fitted.line <- list(intercept=betas[1], slope=betas[2])\n plot.path <- \"./r_tmp/reg.png\"\n plot.data <- create.plot.data.reg(reg.data, params, fitted.line)\n\n # @TODO x and y labels ought to be passed in, probably\n plot.data$xlabel <- reg.data@covariates[[1]]@cov.name\n scale.str <- get.scale(params)\n if ((scale.str==\"standard\") || (scale.str==\"arcsine\")) {\n scale.str <- \"\"\n # This is for the y-axis label on regression plot - don't add \"standard\" or \"arcsine\" to label.\n }\n plot.data$ylabel <- paste(scale.str, \" \", pretty.metric.name(as.character(params$measure)), sep=\"\")\n meta.regression.plot(plot.data, plot.path)\n \n # write the plot data to disk so we can save it\n # @TODO will want to write the params data, too,\n # eventually\n plot.data.path <- save.plot.data(plot.data)\n\n images <- c(\"Regression Plot\"=plot.path)\n plot.names <- c(\"reg.plot\"=\"reg.plot\")\n reg.plot.params.path <- save.plot.data(plot.data)\n plot.params.paths <- c(\"Regression Plot\"=plot.data.path)\n\t\t\tpure.res$weights <- weights(res)\n results <- list(\"images\"=images,\n\t\t\t\t\t \"Summary\"=reg.disp,\n\t\t\t\t\t\t\t\"plot_names\"=plot.names,\n \"plot_params_paths\"=plot.params.paths,\n\t\t\t\t\t\t\t\"res\"=pure.res,\n\t\t\t\t\t\t\t\"res.info\"=rma.uni.value.info())\n\t\t} else if (isnt.null(cond.means.data)) { # Give the conditional means results\n\t\t\tmr.cond.means.disp <- cond_means_display(res, params, display.data, reg.data=reg.data, cat.ref.var.and.levels=cat.ref.var.and.levels, cond.means.data=cond.means.data)\n\t\t\tres.output <- c(pure.res,\n\t\t\t\t\t\t\tlist(Conditional_Means_Section=paste(\"############################\",cond.means.info(cond.means.data), sep=\"\\n\"),\n\t\t\t\t\t\t\t\t Conditional_Means=mr.cond.means.disp))\n\t\t\tres.output.info <- c(rma.uni.value.info(),\n\t\t\t\t\t\t\t\t list(Conditional_Means_Section = list(type=\"vector\", description=\"\"),\n\t\t\t\t\t\t Conditional_Means=list(type=\"blob\", description=\"\")))\n\t\t\tresults <- list(\"Summary\"=reg.disp,\n\t\t\t\t\t\t\t\"Conditional Means\"=mr.cond.means.disp,\n\t\t\t\t\t\t\t\"res\"= res.output,\n\t\t\t\t\t\t\t\"res.info\"= res.output.info\n\t\t\t\t\t\t\t )\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t} else if (display.data$n.cont.covs==0 & length(display.data$factor.n.levels)==1) {\n\t\t\tadj.reg.disp <- adjusted_means_display(res, params, display.data)\n\t\t\tres.output <- c(pure.res,\n\t\t\t\t\t\t\tlist(Adjusted_Means_Section=\"#############################\",\n\t\t\t\t\t\t\t\t Adjusted_Means=adj.reg.disp))\n\t\t\tres.output.info <- c(rma.uni.value.info(),\n\t\t\t\t\t\t\t\t list(Adjusted_Means_Section=list(type=\"vector\", description=\"\"),\n\t\t\t\t\t\t\t\t\t Adjusted_Means=list(type=\"blob\", description=\"\")))\n\t\t\tresults <- list(\"Summary\"=capture.output.and.collapse(reg.disp),\n \"Adjusted Mean\"=capture.output.and.collapse(adj.reg.disp),\n\t\t\t\t\t\t\t\"res\"=res.output,\n\t\t\t\t\t\t\t\"res.info\"=res.output.info)\n\t\t} else {\n\t\t\tresults <- list(\"Summary\"=reg.disp,\n\t\t\t\t\t\t\t\"res\"=pure.res,\n\t\t\t\t\t\t\t\"res.info\"=rma.uni.value.info())\n\t\t}\n\t\n\treferences <- \"Meta Regression: meta regression citation placeholder\"\n\tresults[[\"References\"]] <- references\n results\n}\n\ncond.means.info <- function(cond.means.data) {\n\tblurb <- paste(\"\\nConditional means for '\",as.character(cond.means.data$chosen.cov.name), \"',\\nstratified over its levels given the following values for the other covariates:\\n\", sep=\"\")\n\tfor (name in names(cond.means.data)) {\n\t\tif (name != 'chosen.cov.name') {\n\t\t\tblurb <- paste(blurb, name, \" = \", cond.means.data[[name]], \"\\n\", sep=\"\")\n\t\t}\n\t}\n\treturn(blurb)\n}\n\n\nextract.cov.data <- function(reg.data, dont.make.array = FALSE) {\n # separate continuous and factor covariates and extract data.\n # The following are passed to create.regression.display\n n.cont.covs <- 0\n factor.n.levels <- NULL # vector containing number of levels for each factor covariate\n factor.cov.display.col <- NULL\n levels.display.col <- NULL\n studies.display.col <- NULL\n \n # initialize names of continuous covariates to empty list\n cont.cov.names <- c()\n cont.cov.array <- NULL\n factor.cov.array <- NULL\n cat.cov.ref.var.and.levels <- list() #### \n for (n.covs in 1:length(reg.data@covariates)) {\n # put covariate data into two arrays, for continuous and factor covariates.\n cov <- reg.data@covariates[[n.covs]]\n cov.name <- [email protected]\n cov.vals <- [email protected]\n cov.type <- [email protected]\n\t#debug_print <- paste(c(\"Cov name: \", cov.name, \"\\nCov type: \", cov.type,\"\\n\"))\n\t#print(debug_print)\n ref.var <- [email protected]\n if (cov.type==\"continuous\") {\n cov.col <- array(cov.vals, dim=c(length(reg.data@y), 1), \n dimnames=list(NULL, cov.name))\n cont.cov.array <- cbind(cont.cov.array, cov.col)\n cont.cov.names <- c(cont.cov.names, cov.name)\n n.cont.covs <- n.cont.covs + 1\n }\n #factor.cov.array <- NULL # was this causing issue # 222 ?\n if (cov.type==\"factor\") {\n levels <- sort(unique(cov.vals)) # it is actually important for this to be sorted \n # Remove \"\" from levels, if necessary.\n levels.minus.NA <- setdiff(levels, \"\")\n # Levels except for reference variable\n levels.minus.ref.var <- setdiff(levels.minus.NA, ref.var)\n\t \n\t \n cov.cols <- array(dim=c(length(reg.data@y), length(levels.minus.ref.var)))\n studies.col <- c(sum(cov.vals==ref.var))\n for (col.index in 1:length(levels.minus.ref.var)) {\n level <- levels.minus.ref.var[col.index]\n\t\t if (!dont.make.array) {\n cov.cols[cov.vals!=\"\" & cov.vals!=level, col.index] <- 0\n cov.cols[cov.vals!=\"\" & cov.vals==level, col.index] <- 1\n\t }\n studies.col <- c(studies.col, sum(cov.vals==level)) \n }\n factor.cov.array <- cbind(factor.cov.array, cov.cols)\n factor.n.levels <- c(factor.n.levels, length(levels.minus.NA))\n factor.cov.display.col <- c(factor.cov.display.col, cov.name, rep(\"\",length(levels.minus.ref.var)))\n factor.studies.display.col <- c() \n levels.display.col <- c(levels.display.col, ref.var, levels.minus.ref.var)\n studies.display.col <- c(studies.display.col, studies.col)\n\t ref.var.and.levels.in.order <- c(ref.var, levels.minus.ref.var) ####\n\t cat.cov.ref.var.and.levels[[cov.name]] <- ref.var.and.levels.in.order ####\n }\n }\n cov.array <- cbind(cont.cov.array, factor.cov.array)\n cov.display.col <- c(\"Intercept\", cont.cov.names, factor.cov.display.col)\n levels.display.col <- c(rep(\"\",length(cont.cov.names) + 1), levels.display.col)\n studies.display.col <- c(rep(\"\",length(cont.cov.names) + 1), studies.display.col)\n display.data <- list(cov.display.col=cov.display.col, levels.display.col=levels.display.col,\n studies.display.col=studies.display.col, factor.n.levels=factor.n.levels, n.cont.covs=n.cont.covs)\n \n cov.data <- list(cov.array=cov.array, display.data=display.data, cat.ref.var.and.levels=cat.cov.ref.var.and.levels)\n \n}\n\nbinary.fixed.meta.regression <- function(reg.data, params){\n # meta regression for numerical covariates\n cov.data <- array(dim=c(length(reg.data@y), length(cov.names)), dimnames=list(NULL, cov.names)) \n for (cov.name in cov.names) {\n # extract matrix of covariates\n cov.val.str <- paste(\"reg.data@covariates$\", cov.name, sep=\"\")\n cov.vals <- eval(parse(text=cov.val.str))\n cov.data[,cov.name] <- cov.vals\n } \n res<-rma.uni(yi=reg.data@y, sei=reg.data@SE, [email protected],\n level=params$conf.level, digits=params$digits, method=\"FE\", \n mods=cov.data)\n reg.disp <- create.regression.disp(res, params, cov.names)\n if (length(cov.names)==1) {\n # if just 1 covariate, create reg. plot\n betas <- res$b\n fitted.line <- list(intercept=betas[1], slope=betas[2])\n plot.path <- \"./r_tmp/reg.png\"\n plot.data <- create.plot.data.reg(reg.data, params, fitted.line, selected.cov=cov.name)\n meta.regression.plot(plot.data, outpath=plot.path, symSize=1,\n lcol = \"darkred\",\n y.axis.label = \"Effect size\",\n xlabel= cov.name,\n lweight = 3,\n lpatern = \"dotted\",\n plotregion = \"n\",\n mcolor = \"darkgreen\",\n regline = TRUE) \n images <- c(\"Regression Plot\"=plot.path)\n plot.names <- c(\"forest plot\"=\"reg.plot\")\n results <- list(\"images\"=images, \"Summary\"=capture.output.and.collapse(reg.disp), \"plot_names\"=plot.names)\n } else {\n results <- list(\"Summary\"=capture.output.and.collapse(reg.disp))\n }\n\n}\n\nrandom.meta.regression <- function(reg.data, params, cov.name){\n cov.val.str <- paste(\"reg.data@covariates$\", cov.name, sep=\"\")\n cov.vals <- eval(parse(text=cov.val.str))\n res<-rma.uni(yi=reg.data@y, sei=reg.data@SE, [email protected],\n level=params$conf.level, digits=params$digits, \n method=params$rm.method, \n mods=cov.vals)\n reg.disp <- create.regression.disp(res, params)\n reg.disp\n betas <- res$b\n fitted.line <- list(intercept=betas[1], slope=betas[2])\n # temporary fix until params$rp_outpath is added to the GUI\n if (is.null(params$rp_outpath)) {\n plot.path <- \"./r_tmp/reg.png\"\n }\n else {\n plot.path <- params$rp_outpath\n }\n plot.data <- create.plot.data.reg(reg.data, params, fitted.line, selected.cov=cov.name)\n meta.regression.plot(plot.data, outpath=plot.path, symSize=1,\n lcol = \"darkred\",\n y.axis.label = \"Effect size\",\n xlabel= cov.name,\n lweight = 3,\n lpatern = \"solid\",\n plotregion = \"n\",\n mcolor = \"black\",\n regline = TRUE) \n images <- c(\"Regression Plot\"=plot.path)\n plot.names <- c(\"forest plot\"=\"reg.plot\")\n results <- list(\"images\"=images, \"Summary\"=capture.output.and.collapse(reg.disp), \"plot_names\"=plot.names)\n results\n}\n\nbinary.random.meta.regression.parameters <- function(){\n # parameters\n rm_method_ls <- c(\"HE\", \"DL\", \"SJ\", \"ML\", \"REML\", \"EB\")\n params <- list(\"rm.method\"=rm_method_ls, \"conf.level\"=\"float\", \"digits\"=\"float\")\n \n # default values\n defaults <- list(\"rm.method\"=\"DL\", \"conf.level\"=95, \"digits\"=3)\n \n var_order <- c(\"rm.method\", \"conf.level\", \"digits\")\n parameters <- list(\"parameters\"=params, \"defaults\"=defaults, \"var_order\"=var_order)\n}\n\ncategorical.meta.regression <- function(reg.data, params, cov.names) {\n # meta-regression for categorical covariates \n cov.data <- array()\n var.names <- NULL\n for (cov.name in cov.names) {\n # extract matrix of covariates\n cov.val.str <- paste(\"reg.data@covariates$\", cov.name, sep=\"\")\n groups <- eval(parse(text=cov.val.str))\n group.list <- unique(groups)\n array.tmp <- array(dim=c(length(reg.data@y), length(group.list)-1), dimnames=list(NULL, group.list[-1]))\n for (group in group.list[-1]) {\n array.tmp[,group] <- as.numeric(groups == group)\n }\n if (length(cov.data) > 1) {\n cov.data <- cbind(cov.data, array.tmp)\n } else {\n cov.data <- array.tmp\n }\n }\n res <-rma.uni(yi=reg.data@y, sei=reg.data@SE, [email protected],\n level=params$conf.level, digits=params$digits, method=\"FE\", \n mods=cov.data)\n reg.disp <- create.regression.disp(res, params, cov.names=dimnames(cov.data)[[2]]) \n results <- list(\"Summary\"=reg.disp)\n}" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.3511904776096344, "avg_line_length": 13.272727012634277, "blob_id": "21ebe5057f6b3002ad73627627f690d575a6e522", "content_id": "5fbc7667d6b2931ce8cb723bfe0c62db3d91af9d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 168, "license_type": "no_license", "max_line_length": 29, "num_lines": 11, "path": "/src/R/HSROC/R/f.test.R", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "f.test <-\r\nfunction (x, m1, v1, m2, v2) \r\n{\r\n if (x == 1) {\r\n y = rnorm(1, m1, v1)\r\n }\r\n else {\r\n y = rnorm(1, m2, v2)\r\n }\r\n return(y)\r\n}\r\n" }, { "alpha_fraction": 0.6688879728317261, "alphanum_fraction": 0.686551570892334, "avg_line_length": 56.52886962890625, "blob_id": "66f5507787898c8af58d554ea4e69b0f129ae2ed", "content_id": "31aa32e39a2452454e34f9f3b8f7392ccb798d16", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 24910, "license_type": "no_license", "max_line_length": 148, "num_lines": 433, "path": "/src/ui_meta.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'meta.ui'\n#\n# Created: Thu May 16 13:55:37 2013\n# by: PyQt4 UI code generator 4.10\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt4 import QtCore, QtGui\nimport ma_data_table_view\n\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n def _fromUtf8(s):\n return s\n\ntry:\n _encoding = QtGui.QApplication.UnicodeUTF8\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig, _encoding)\nexcept AttributeError:\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig)\n\nclass Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(_fromUtf8(\"MainWindow\"))\n MainWindow.resize(1000, 618)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n MainWindow.setFont(font)\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(_fromUtf8(\":/misc/meta.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n MainWindow.setWindowIcon(icon)\n self.centralwidget = QtGui.QWidget(MainWindow)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n self.centralwidget.setFont(font)\n self.centralwidget.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))\n self.centralwidget.setObjectName(_fromUtf8(\"centralwidget\"))\n self.verticalLayout_3 = QtGui.QVBoxLayout(self.centralwidget)\n self.verticalLayout_3.setSpacing(0)\n self.verticalLayout_3.setContentsMargins(2, -1, 2, 0)\n self.verticalLayout_3.setObjectName(_fromUtf8(\"verticalLayout_3\"))\n self.nav_frame = QtGui.QFrame(self.centralwidget)\n self.nav_frame.setFrameShape(QtGui.QFrame.StyledPanel)\n self.nav_frame.setFrameShadow(QtGui.QFrame.Plain)\n self.nav_frame.setObjectName(_fromUtf8(\"nav_frame\"))\n self.verticalLayout_2 = QtGui.QVBoxLayout(self.nav_frame)\n self.verticalLayout_2.setSpacing(1)\n self.verticalLayout_2.setContentsMargins(0, 1, 0, 0)\n self.verticalLayout_2.setObjectName(_fromUtf8(\"verticalLayout_2\"))\n self.verticalLayout = QtGui.QVBoxLayout()\n self.verticalLayout.setSpacing(0)\n self.verticalLayout.setContentsMargins(-1, -1, -1, 0)\n self.verticalLayout.setObjectName(_fromUtf8(\"verticalLayout\"))\n #self.tableView = QtGui.QTableView(self.nav_frame)\n self.tableView = ma_data_table_view.MADataTable(self.nav_frame)\n sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.tableView.sizePolicy().hasHeightForWidth())\n self.tableView.setSizePolicy(sizePolicy)\n self.tableView.setAlternatingRowColors(True)\n self.tableView.setObjectName(_fromUtf8(\"tableView\"))\n self.verticalLayout.addWidget(self.tableView)\n self.frame = QtGui.QFrame(self.nav_frame)\n self.frame.setMinimumSize(QtCore.QSize(0, 65))\n self.frame.setFrameShape(QtGui.QFrame.StyledPanel)\n self.frame.setFrameShadow(QtGui.QFrame.Raised)\n self.frame.setObjectName(_fromUtf8(\"frame\"))\n self.horizontalLayout = QtGui.QHBoxLayout(self.frame)\n self.horizontalLayout.setSpacing(0)\n self.horizontalLayout.setMargin(0)\n self.horizontalLayout.setObjectName(_fromUtf8(\"horizontalLayout\"))\n self.controlbutton_layot = QtGui.QHBoxLayout()\n self.controlbutton_layot.setSpacing(0)\n self.controlbutton_layot.setObjectName(_fromUtf8(\"controlbutton_layot\"))\n self.frame_2 = QtGui.QFrame(self.frame)\n sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.frame_2.sizePolicy().hasHeightForWidth())\n self.frame_2.setSizePolicy(sizePolicy)\n self.frame_2.setMinimumSize(QtCore.QSize(181, 61))\n self.frame_2.setMaximumSize(QtCore.QSize(181, 61))\n self.frame_2.setFrameShape(QtGui.QFrame.StyledPanel)\n self.frame_2.setFrameShadow(QtGui.QFrame.Raised)\n self.frame_2.setObjectName(_fromUtf8(\"frame_2\"))\n self.nav_left_btn = QtGui.QToolButton(self.frame_2)\n self.nav_left_btn.setGeometry(QtCore.QRect(0, 10, 20, 41))\n self.nav_left_btn.setText(_fromUtf8(\"\"))\n icon1 = QtGui.QIcon()\n icon1.addPixmap(QtGui.QPixmap(_fromUtf8(\":/misc/left_arrow.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.nav_left_btn.setIcon(icon1)\n self.nav_left_btn.setIconSize(QtCore.QSize(64, 64))\n self.nav_left_btn.setAutoRaise(True)\n self.nav_left_btn.setObjectName(_fromUtf8(\"nav_left_btn\"))\n self.nav_up_btn = QtGui.QToolButton(self.frame_2)\n self.nav_up_btn.setGeometry(QtCore.QRect(40, 0, 51, 20))\n self.nav_up_btn.setText(_fromUtf8(\"\"))\n icon2 = QtGui.QIcon()\n icon2.addPixmap(QtGui.QPixmap(_fromUtf8(\":/misc/up_arrow.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.nav_up_btn.setIcon(icon2)\n self.nav_up_btn.setIconSize(QtCore.QSize(64, 64))\n self.nav_up_btn.setAutoRaise(True)\n self.nav_up_btn.setObjectName(_fromUtf8(\"nav_up_btn\"))\n self.nav_down_btn = QtGui.QToolButton(self.frame_2)\n self.nav_down_btn.setGeometry(QtCore.QRect(40, 40, 51, 20))\n self.nav_down_btn.setText(_fromUtf8(\"\"))\n icon3 = QtGui.QIcon()\n icon3.addPixmap(QtGui.QPixmap(_fromUtf8(\":/misc/down_arrow.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.nav_down_btn.setIcon(icon3)\n self.nav_down_btn.setIconSize(QtCore.QSize(64, 64))\n self.nav_down_btn.setToolButtonStyle(QtCore.Qt.ToolButtonIconOnly)\n self.nav_down_btn.setAutoRaise(True)\n self.nav_down_btn.setArrowType(QtCore.Qt.NoArrow)\n self.nav_down_btn.setObjectName(_fromUtf8(\"nav_down_btn\"))\n self.nav_lbl = QtGui.QLabel(self.frame_2)\n self.nav_lbl.setGeometry(QtCore.QRect(20, 20, 91, 21))\n font = QtGui.QFont()\n font.setPointSize(10)\n font.setBold(False)\n font.setWeight(50)\n self.nav_lbl.setFont(font)\n self.nav_lbl.setText(_fromUtf8(\"\"))\n self.nav_lbl.setAlignment(QtCore.Qt.AlignCenter)\n self.nav_lbl.setObjectName(_fromUtf8(\"nav_lbl\"))\n self.nav_right_btn = QtGui.QToolButton(self.frame_2)\n self.nav_right_btn.setGeometry(QtCore.QRect(110, 10, 20, 41))\n self.nav_right_btn.setText(_fromUtf8(\"\"))\n icon4 = QtGui.QIcon()\n icon4.addPixmap(QtGui.QPixmap(_fromUtf8(\":/misc/right_arrow.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.nav_right_btn.setIcon(icon4)\n self.nav_right_btn.setIconSize(QtCore.QSize(64, 64))\n self.nav_right_btn.setAutoRaise(True)\n self.nav_right_btn.setObjectName(_fromUtf8(\"nav_right_btn\"))\n self.nav_add_btn = QtGui.QToolButton(self.frame_2)\n self.nav_add_btn.setGeometry(QtCore.QRect(130, 10, 51, 41))\n self.nav_add_btn.setText(_fromUtf8(\"\"))\n icon5 = QtGui.QIcon()\n icon5.addPixmap(QtGui.QPixmap(_fromUtf8(\":/function_icon_set/function_icon_set/add_48.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.nav_add_btn.setIcon(icon5)\n self.nav_add_btn.setIconSize(QtCore.QSize(64, 64))\n self.nav_add_btn.setAutoRaise(True)\n self.nav_add_btn.setObjectName(_fromUtf8(\"nav_add_btn\"))\n self.controlbutton_layot.addWidget(self.frame_2)\n self.gridLayout = QtGui.QGridLayout()\n self.gridLayout.setObjectName(_fromUtf8(\"gridLayout\"))\n self.label = QtGui.QLabel(self.frame)\n sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())\n self.label.setSizePolicy(sizePolicy)\n self.label.setMinimumSize(QtCore.QSize(80, 25))\n self.label.setMaximumSize(QtCore.QSize(80, 25))\n font = QtGui.QFont()\n font.setPointSize(9)\n self.label.setFont(font)\n self.label.setObjectName(_fromUtf8(\"label\"))\n self.gridLayout.addWidget(self.label, 0, 0, 1, 1)\n self.label_3 = QtGui.QLabel(self.frame)\n sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.label_3.sizePolicy().hasHeightForWidth())\n self.label_3.setSizePolicy(sizePolicy)\n self.label_3.setMinimumSize(QtCore.QSize(80, 25))\n self.label_3.setMaximumSize(QtCore.QSize(80, 25))\n font = QtGui.QFont()\n font.setPointSize(9)\n self.label_3.setFont(font)\n self.label_3.setObjectName(_fromUtf8(\"label_3\"))\n self.gridLayout.addWidget(self.label_3, 1, 0, 1, 1)\n self.cur_time_lbl = QtGui.QLabel(self.frame)\n sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.cur_time_lbl.sizePolicy().hasHeightForWidth())\n self.cur_time_lbl.setSizePolicy(sizePolicy)\n self.cur_time_lbl.setMinimumSize(QtCore.QSize(80, 25))\n self.cur_time_lbl.setMaximumSize(QtCore.QSize(80, 25))\n font = QtGui.QFont()\n font.setPointSize(9)\n self.cur_time_lbl.setFont(font)\n self.cur_time_lbl.setText(_fromUtf8(\"\"))\n self.cur_time_lbl.setObjectName(_fromUtf8(\"cur_time_lbl\"))\n self.gridLayout.addWidget(self.cur_time_lbl, 1, 1, 1, 1)\n self.cur_outcome_lbl = QtGui.QLabel(self.frame)\n sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.cur_outcome_lbl.sizePolicy().hasHeightForWidth())\n self.cur_outcome_lbl.setSizePolicy(sizePolicy)\n self.cur_outcome_lbl.setMinimumSize(QtCore.QSize(80, 25))\n self.cur_outcome_lbl.setMaximumSize(QtCore.QSize(80, 25))\n font = QtGui.QFont()\n font.setPointSize(9)\n self.cur_outcome_lbl.setFont(font)\n self.cur_outcome_lbl.setText(_fromUtf8(\"\"))\n self.cur_outcome_lbl.setObjectName(_fromUtf8(\"cur_outcome_lbl\"))\n self.gridLayout.addWidget(self.cur_outcome_lbl, 0, 1, 1, 1)\n self.controlbutton_layot.addLayout(self.gridLayout)\n self.horizontalLayout.addLayout(self.controlbutton_layot)\n spacerItem = QtGui.QSpacerItem(396, 18, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)\n self.horizontalLayout.addItem(spacerItem)\n self.ci_layot = QtGui.QHBoxLayout()\n self.ci_layot.setObjectName(_fromUtf8(\"ci_layot\"))\n self.horizontalLayout.addLayout(self.ci_layot)\n self.verticalLayout.addWidget(self.frame)\n self.verticalLayout_2.addLayout(self.verticalLayout)\n self.verticalLayout_3.addWidget(self.nav_frame)\n self.dataset_file_lbl = QtGui.QLabel(self.centralwidget)\n self.dataset_file_lbl.setText(_fromUtf8(\"\"))\n self.dataset_file_lbl.setObjectName(_fromUtf8(\"dataset_file_lbl\"))\n self.verticalLayout_3.addWidget(self.dataset_file_lbl)\n MainWindow.setCentralWidget(self.centralwidget)\n self.menu_bar = QtGui.QMenuBar(MainWindow)\n self.menu_bar.setGeometry(QtCore.QRect(0, 0, 1000, 22))\n self.menu_bar.setObjectName(_fromUtf8(\"menu_bar\"))\n self.menu_file = QtGui.QMenu(self.menu_bar)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n self.menu_file.setFont(font)\n self.menu_file.setObjectName(_fromUtf8(\"menu_file\"))\n self.action_open_recent_2 = QtGui.QMenu(self.menu_file)\n self.action_open_recent_2.setObjectName(_fromUtf8(\"action_open_recent_2\"))\n self.menuAnalysis = QtGui.QMenu(self.menu_bar)\n self.menuAnalysis.setObjectName(_fromUtf8(\"menuAnalysis\"))\n self.menuMetric = QtGui.QMenu(self.menuAnalysis)\n self.menuMetric.setObjectName(_fromUtf8(\"menuMetric\"))\n self.menuDataset = QtGui.QMenu(self.menu_bar)\n self.menuDataset.setObjectName(_fromUtf8(\"menuDataset\"))\n self.menuEdit = QtGui.QMenu(self.menu_bar)\n self.menuEdit.setObjectName(_fromUtf8(\"menuEdit\"))\n self.menuHelp = QtGui.QMenu(self.menu_bar)\n self.menuHelp.setObjectName(_fromUtf8(\"menuHelp\"))\n MainWindow.setMenuBar(self.menu_bar)\n self.statusbar = QtGui.QStatusBar(MainWindow)\n self.statusbar.setObjectName(_fromUtf8(\"statusbar\"))\n MainWindow.setStatusBar(self.statusbar)\n self.toolBar = QtGui.QToolBar(MainWindow)\n self.toolBar.setObjectName(_fromUtf8(\"toolBar\"))\n MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)\n self.action_save = QtGui.QAction(MainWindow)\n icon6 = QtGui.QIcon()\n icon6.addPixmap(QtGui.QPixmap(_fromUtf8(\":/function_icon_set/function_icon_set/floppy_disk_48.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.action_save.setIcon(icon6)\n self.action_save.setObjectName(_fromUtf8(\"action_save\"))\n self.action_open = QtGui.QAction(MainWindow)\n icon7 = QtGui.QIcon()\n icon7.addPixmap(QtGui.QPixmap(_fromUtf8(\":/function_icon_set/function_icon_set/folder_48.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.action_open.setIcon(icon7)\n self.action_open.setObjectName(_fromUtf8(\"action_open\"))\n self.action_quit = QtGui.QAction(MainWindow)\n icon8 = QtGui.QIcon()\n icon8.addPixmap(QtGui.QPixmap(_fromUtf8(\":/toolbar-icons/toolbar-icons/quit.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.action_quit.setIcon(icon8)\n self.action_quit.setObjectName(_fromUtf8(\"action_quit\"))\n self.action_go = QtGui.QAction(MainWindow)\n icon9 = QtGui.QIcon()\n icon9.addPixmap(QtGui.QPixmap(_fromUtf8(\":/misc/meta_analysis.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.action_go.setIcon(icon9)\n self.action_go.setObjectName(_fromUtf8(\"action_go\"))\n self.action_edit = QtGui.QAction(MainWindow)\n self.action_edit.setObjectName(_fromUtf8(\"action_edit\"))\n self.action_view_network = QtGui.QAction(MainWindow)\n self.action_view_network.setObjectName(_fromUtf8(\"action_view_network\"))\n self.action_add_covariate = QtGui.QAction(MainWindow)\n self.action_add_covariate.setObjectName(_fromUtf8(\"action_add_covariate\"))\n self.action_cum_ma = QtGui.QAction(MainWindow)\n icon10 = QtGui.QIcon()\n icon10.addPixmap(QtGui.QPixmap(_fromUtf8(\":/misc/cum_meta_analysis.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.action_cum_ma.setIcon(icon10)\n self.action_cum_ma.setObjectName(_fromUtf8(\"action_cum_ma\"))\n self.action_loo_ma = QtGui.QAction(MainWindow)\n icon11 = QtGui.QIcon()\n icon11.addPixmap(QtGui.QPixmap(_fromUtf8(\":/misc/leave_one_out.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.action_loo_ma.setIcon(icon11)\n self.action_loo_ma.setObjectName(_fromUtf8(\"action_loo_ma\"))\n self.actionOR = QtGui.QAction(MainWindow)\n self.actionOR.setObjectName(_fromUtf8(\"actionOR\"))\n self.actionRR = QtGui.QAction(MainWindow)\n self.actionRR.setObjectName(_fromUtf8(\"actionRR\"))\n self.actionTX_Mean_one_arm = QtGui.QAction(MainWindow)\n self.actionTX_Mean_one_arm.setObjectName(_fromUtf8(\"actionTX_Mean_one_arm\"))\n self.actionYo = QtGui.QAction(MainWindow)\n self.actionYo.setObjectName(_fromUtf8(\"actionYo\"))\n self.actionNew_dataset = QtGui.QAction(MainWindow)\n self.actionNew_dataset.setObjectName(_fromUtf8(\"actionNew_dataset\"))\n self.action_new_dataset = QtGui.QAction(MainWindow)\n self.action_new_dataset.setIcon(icon5)\n self.action_new_dataset.setObjectName(_fromUtf8(\"action_new_dataset\"))\n self.action_meta_regression = QtGui.QAction(MainWindow)\n icon12 = QtGui.QIcon()\n icon12.addPixmap(QtGui.QPixmap(_fromUtf8(\":/misc/meta_reg.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.action_meta_regression.setIcon(icon12)\n self.action_meta_regression.setObjectName(_fromUtf8(\"action_meta_regression\"))\n self.action_undo = QtGui.QAction(MainWindow)\n icon13 = QtGui.QIcon()\n icon13.addPixmap(QtGui.QPixmap(_fromUtf8(\":/toolbar-icons/toolbar-icons/back.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.action_undo.setIcon(icon13)\n self.action_undo.setObjectName(_fromUtf8(\"action_undo\"))\n self.action_redo = QtGui.QAction(MainWindow)\n icon14 = QtGui.QIcon()\n icon14.addPixmap(QtGui.QPixmap(_fromUtf8(\":/toolbar-icons/toolbar-icons/forward.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.action_redo.setIcon(icon14)\n self.action_redo.setObjectName(_fromUtf8(\"action_redo\"))\n self.action_copy = QtGui.QAction(MainWindow)\n icon15 = QtGui.QIcon()\n icon15.addPixmap(QtGui.QPixmap(_fromUtf8(\":/misc/copy.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.action_copy.setIcon(icon15)\n self.action_copy.setObjectName(_fromUtf8(\"action_copy\"))\n self.action_paste = QtGui.QAction(MainWindow)\n icon16 = QtGui.QIcon()\n icon16.addPixmap(QtGui.QPixmap(_fromUtf8(\":/toolbar-icons/toolbar-icons/paste.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.action_paste.setIcon(icon16)\n self.action_paste.setObjectName(_fromUtf8(\"action_paste\"))\n self.action_subgroup_ma = QtGui.QAction(MainWindow)\n icon17 = QtGui.QIcon()\n icon17.addPixmap(QtGui.QPixmap(_fromUtf8(\":/misc/subgroup_ma.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.action_subgroup_ma.setIcon(icon17)\n self.action_subgroup_ma.setObjectName(_fromUtf8(\"action_subgroup_ma\"))\n self.action_save_as = QtGui.QAction(MainWindow)\n self.action_save_as.setObjectName(_fromUtf8(\"action_save_as\"))\n self.action_open_help = QtGui.QAction(MainWindow)\n self.action_open_help.setObjectName(_fromUtf8(\"action_open_help\"))\n self.action_change_conf_level = QtGui.QAction(MainWindow)\n icon18 = QtGui.QIcon()\n icon18.addPixmap(QtGui.QPixmap(_fromUtf8(\":/misc/ci_icon.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.action_change_conf_level.setIcon(icon18)\n self.action_change_conf_level.setObjectName(_fromUtf8(\"action_change_conf_level\"))\n self.action_import_csv = QtGui.QAction(MainWindow)\n icon19 = QtGui.QIcon()\n icon19.addPixmap(QtGui.QPixmap(_fromUtf8(\":/function_icon_set/function_icon_set/box_download_48.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.action_import_csv.setIcon(icon19)\n self.action_import_csv.setObjectName(_fromUtf8(\"action_import_csv\"))\n self.menu_file.addAction(self.action_save)\n self.menu_file.addAction(self.action_save_as)\n self.menu_file.addAction(self.action_open)\n self.menu_file.addAction(self.action_open_recent_2.menuAction())\n self.menu_file.addAction(self.action_import_csv)\n self.menu_file.addAction(self.action_quit)\n self.menuAnalysis.addAction(self.menuMetric.menuAction())\n self.menuAnalysis.addSeparator()\n self.menuAnalysis.addAction(self.action_go)\n self.menuAnalysis.addSeparator()\n self.menuAnalysis.addAction(self.action_cum_ma)\n self.menuAnalysis.addAction(self.action_loo_ma)\n self.menuAnalysis.addAction(self.action_subgroup_ma)\n self.menuAnalysis.addSeparator()\n self.menuAnalysis.addAction(self.action_meta_regression)\n self.menuAnalysis.addAction(self.action_change_conf_level)\n self.menuDataset.addAction(self.action_new_dataset)\n self.menuDataset.addSeparator()\n self.menuDataset.addAction(self.action_edit)\n self.menuDataset.addAction(self.action_view_network)\n self.menuDataset.addAction(self.action_add_covariate)\n self.menuEdit.addAction(self.action_undo)\n self.menuEdit.addAction(self.action_redo)\n self.menuEdit.addAction(self.action_copy)\n self.menuEdit.addAction(self.action_paste)\n self.menuHelp.addAction(self.action_open_help)\n self.menu_bar.addAction(self.menu_file.menuAction())\n self.menu_bar.addAction(self.menuEdit.menuAction())\n self.menu_bar.addAction(self.menuAnalysis.menuAction())\n self.menu_bar.addAction(self.menuDataset.menuAction())\n self.menu_bar.addAction(self.menuHelp.menuAction())\n self.toolBar.addAction(self.action_new_dataset)\n self.toolBar.addAction(self.action_open)\n self.toolBar.addAction(self.action_import_csv)\n self.toolBar.addAction(self.action_save)\n self.toolBar.addSeparator()\n self.toolBar.addAction(self.action_change_conf_level)\n self.toolBar.addAction(self.action_go)\n self.toolBar.addAction(self.action_cum_ma)\n self.toolBar.addAction(self.action_subgroup_ma)\n self.toolBar.addAction(self.action_meta_regression)\n self.toolBar.addAction(self.action_loo_ma)\n self.toolBar.addSeparator()\n self.toolBar.addAction(self.action_undo)\n self.toolBar.addAction(self.action_redo)\n self.toolBar.addSeparator()\n self.toolBar.addAction(self.action_copy)\n self.toolBar.addAction(self.action_paste)\n self.toolBar.addSeparator()\n self.toolBar.addAction(self.action_quit)\n\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"OpenMeta[analyst]\", None))\n self.label.setText(_translate(\"MainWindow\", \"outcome:\", None))\n self.label_3.setText(_translate(\"MainWindow\", \"follow-up:\", None))\n self.menu_file.setTitle(_translate(\"MainWindow\", \"File\", None))\n self.action_open_recent_2.setTitle(_translate(\"MainWindow\", \"open recent...\", None))\n self.menuAnalysis.setTitle(_translate(\"MainWindow\", \"Analysis\", None))\n self.menuMetric.setTitle(_translate(\"MainWindow\", \"metric\", None))\n self.menuDataset.setTitle(_translate(\"MainWindow\", \"Dataset\", None))\n self.menuEdit.setTitle(_translate(\"MainWindow\", \"Edit\", None))\n self.menuHelp.setTitle(_translate(\"MainWindow\", \"Help\", None))\n self.toolBar.setWindowTitle(_translate(\"MainWindow\", \"toolBar\", None))\n self.action_save.setText(_translate(\"MainWindow\", \"save\", None))\n self.action_open.setText(_translate(\"MainWindow\", \"open...\", None))\n self.action_quit.setText(_translate(\"MainWindow\", \"quit\", None))\n self.action_go.setText(_translate(\"MainWindow\", \"meta-analysis...\", None))\n self.action_edit.setText(_translate(\"MainWindow\", \"edit...\", None))\n self.action_view_network.setText(_translate(\"MainWindow\", \"view network...\", None))\n self.action_add_covariate.setText(_translate(\"MainWindow\", \"add covariate...\", None))\n self.action_cum_ma.setText(_translate(\"MainWindow\", \"cumulative meta-analysis...\", None))\n self.action_loo_ma.setText(_translate(\"MainWindow\", \"leave-one-out meta-analysis...\", None))\n self.actionOR.setText(_translate(\"MainWindow\", \"OR\", None))\n self.actionRR.setText(_translate(\"MainWindow\", \"RR\", None))\n self.actionTX_Mean_one_arm.setText(_translate(\"MainWindow\", \"TX Mean (one-arm)\", None))\n self.actionYo.setText(_translate(\"MainWindow\", \"yo\", None))\n self.actionNew_dataset.setText(_translate(\"MainWindow\", \"new dataset...\", None))\n self.action_new_dataset.setText(_translate(\"MainWindow\", \"new dataset...\", None))\n self.action_meta_regression.setText(_translate(\"MainWindow\", \"meta-regression\", None))\n self.action_undo.setText(_translate(\"MainWindow\", \"undo (ctrl + z)\", None))\n self.action_redo.setText(_translate(\"MainWindow\", \"redo (ctrl + y)\", None))\n self.action_copy.setText(_translate(\"MainWindow\", \"copy (ctrl + c)\", None))\n self.action_paste.setText(_translate(\"MainWindow\", \"paste (ctrl + v)\", None))\n self.action_subgroup_ma.setText(_translate(\"MainWindow\", \"subgroup meta-analysis\", None))\n self.action_save_as.setText(_translate(\"MainWindow\", \"save as...\", None))\n self.action_open_help.setText(_translate(\"MainWindow\", \"open help\", None))\n self.action_change_conf_level.setText(_translate(\"MainWindow\", \"change confidence level\", None))\n self.action_import_csv.setText(_translate(\"MainWindow\", \"import CSV\", None))\n\nimport forms.icons_rc\n" }, { "alpha_fraction": 0.5355646014213562, "alphanum_fraction": 0.5392162799835205, "avg_line_length": 47.57378005981445, "blob_id": "3e05f956f431e98ab76417ca8591b7a80a26af75", "content_id": "f42780bac868ea20434666d880bdf3cbf873911a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 94476, "license_type": "no_license", "max_line_length": 165, "num_lines": 1945, "path": "/src/ma_data_table_model.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "#########################################################################################\n# #\n# Byron C. Wallace #\n# George Dietz #\n# CEBM @ Brown #\n# OpenMeta[analyst] #\n# --- #\n# Proxy class, interfaces between the underlying representation (in ma_dataset.py) #\n# and the DataTableView UI. Basically deals with keeping track of which outcomes/ #\n# follow-ups/treatments are being viewed. See Summerfield's chapters on M-V-C #\n# in \"Rapid GUI Programming with Python and QT\" for an overview of the architecture. #\n#########################################################################################\n\n# core libraries\nfrom PyQt4.Qt import Qt\nfrom PyQt4 import QtCore\nfrom PyQt4.QtCore import QAbstractTableModel, QModelIndex, QString, QVariant, SIGNAL\nfrom PyQt4.QtGui import QIcon\n\n# home-grown\nfrom ma_dataset import Dataset,Outcome,Study,Covariate\nfrom meta_globals import *\nimport calculator_routines as calc_fncs\nimport meta_py_r\n\n# number of (empty) rows in the spreadsheet to show\n# following the last study.\nDUMMY_ROWS = 20\n\ndef DebugHelper(function):\n def _DebugHelper(*args, **kw):\n print(\"Entered %s\" % function.func_name)\n res = function(*args, **kw)\n print(\"Left %s\" % function.func_name)\n return res\n return _DebugHelper\n\nclass DatasetModel(QAbstractTableModel):\n '''\n This module mediates between the classes comprising a dataset\n (i.e., study & ma_unit objects) and the view. In particular, we\n subclass the QAbstractTableModel and provide the fields of interest\n to the view.\n\n Apologies for the mixing of camelCase and lower_case style method\n names; the former are due to the QT framework, but I just couldn't\n bring myself to maintain this blighted style.\n '''\n \n #\n # column indices; these are a core component of this class,\n # as these indices are what maps the UI to the model. The following\n # columns are constant across datatypes, but some (e.g., the \n # columns corresponding to raw data) are variable. see the\n # update_column_indices method for more.\n INCLUDE_STUDY = 0\n NAME, YEAR = [col+1 for col in range(2)]\n \n headers = [\"include\", \"study name\", \"year\"]\n \n \n def __init__(self, filename=QString(), dataset=None, add_blank_study=True):\n super(DatasetModel, self).__init__()\n \n self.conf_level = self.set_conf_level(DEFAULT_CONF_LEVEL)\n\n self.dataset = dataset\n if dataset is None:\n self.dataset = Dataset()\n\n if add_blank_study:\n # include an extra blank study to begin with\n self.dataset.studies.append(Study(self.max_study_id() +1))\n # ... and mark this study as such.\n self.study_auto_added = self.dataset.studies[-1].id\n \n # these variables track which meta-analytic unit,\n # i.e., outcome and time period, are being viewed\n self.current_outcome = None # Current outcome name, not an outcome object # SHOULD BE REFACTORED to self.current_outcome_name to be more accurate\n self.current_time_point = 0\n \n # we also track which groups are being viewed\n self.tx_index_a = 0\n self.tx_index_b = 1\n\n self.update_current_group_names()\n \n print(\"calling update column indices from ma_data_table_model init\")\n self.update_column_indices()\n \n \n # @TODO parameterize; make variable\n self.current_effect = \"OR\" \n\n # @TODO presumably the COVARIATES will contain the column\n # indices and the currently_displayed... will contain the names\n # of the covariates being displayed in said columns, in order\n self.COVARIATES = None\n self.currently_displayed_covariates = []\n\n # @TODO\n self.LABELS = None\n \n\n self.NUM_DIGITS = 3\n self.dirty = False\n\n \n def set_current_metric(self, metric):\n self.current_effect = metric\n print \"OK! metric updated.\"\n \n def update_current_outcome(self):\n outcome_names = self.dataset.get_outcome_names()\n ###\n # @TODO we need to maintain a current outcome\n # index here, as we do for groups (below), so that\n # when the user edits the currently displayed outcome,\n # the edited outcome is shown in its place\n self.current_outcome = outcome_names[0] if len(outcome_names)>0 else None\n self.reset()\n \n def update_current_time_points(self):\n if self.current_outcome is not None:\n # note that the user cannot delete all follow-ups; so it's safe to assume this dictionary has \n # at least one entry\n self.current_time_point = self.dataset.outcome_names_to_follow_ups[self.current_outcome].keys()[0]\n else:\n self.current_time_point = 0\n self.reset()\n \n def update_current_group_names(self):\n '''\n This is to be called after the model has been\n edited (via, e.g., the edit_dialog module)\n '''\n group_names = self.dataset.get_group_names() \n n_groups = len(group_names)\n if n_groups > 1:\n # make sure the indices are within range -- the\n # model may have changed without our knowing.\n # may have been nicer to have a notification\n # framework here (i.e., have the underlying model\n # notify us when a group has been deleted) rather\n # than doing it on the fly...\n self.tx_index_a = self.tx_index_a % n_groups\n self.tx_index_b = self.tx_index_b % n_groups\n while self.tx_index_a == self.tx_index_b:\n self._next_group_indices(group_names)\n self.current_txs = [group_names[self.tx_index_a], group_names[self.tx_index_b]]\n else:\n if not self.is_diag():\n #self.current_txs = [\"tx A\", \"tx B\"]\n self.current_txs = DEFAULT_GROUP_NAMES\n else:\n self.current_txs = [\"test 1\"]\n self.previous_txs = self.current_txs\n self.reset()\n \n def update_column_indices(self):\n # Here we update variable column indices, contingent on \n # the type data being displayed, the number of covariates, etc. \n # It is extremely important that these are updated as necessary\n # from the view side of things\n \n #######################################################################\n current_data_type = self.get_current_outcome_type()\n outcome_subtype = self.get_current_outcome_subtype()\n\n self.RAW_DATA, self.OUTCOMES = self.get_column_indices(current_data_type, outcome_subtype)\n \n @staticmethod\n def get_column_indices(data_type, sub_type):\n '''\n Basically to support getting the column indices w/o having to\n instantiate an instance of the class\n '''\n \n raws, outcomes = [],[] # Raw & outcome indices\n \n # offset corresponds to the first three columns, which \n # are include study, name, and year.\n offset = 3\n if data_type == \"binary\":\n raws = [col+offset for col in range(4)]\n outcomes = [7, 8, 9]\n elif data_type == \"continuous\":\n raws = [col+offset for col in range(6)]\n outcomes = [9, 10, 11]\n if sub_type == 'generic_effect': # generic effect and se\n print(\"Detected generic effect outcome in update_column_indices\")\n raws = []\n outcomes = [offset, offset+1] #effect and se\n else: # diagnostic\n raws = [col+offset for col in range(4)]\n outcomes = [7, 8, 9, 10, 11, 12] # sensitivity & specificity\n \n return raws, outcomes\n \n \n def format_float(self, float_var, num_digits=None):\n ''' this method assumes the input can be cast to a float! '''\n float_var = float(float_var)\n precision = num_digits or self.NUM_DIGITS\n formatted_str = \"'%.\" + str(precision) + \"f'\" \n # kind of hacky; I can't find a better way to make the\n # number of digits in the formatting parametric. oh well.\n return eval(formatted_str + \"% float_var\")\n\n\n\n def data(self, index, role=Qt.DisplayRole):\n '''\n Implements the required QTTableModel data method. There is a lot of switching on \n role/index/datatype here, but this seems consistent with the QT paradigm (see \n Summerfield's book)\n '''\n\n # number of digits to show in edit mode. this, I think, is enough.\n NUM_DIGITS_PRECISE = 12 \n # by default, we'll use the global NUM_DIGITS; this is the default\n # used by the format_float method unless num_digits is set to \n # something else, i.e., NUM_DIGITS_PRECISE in the case of editing\n num_digits = None\n\n if not index.isValid() or not (0 <= index.row() < len(self.dataset)):\n return QVariant()\n study = self.dataset.studies[index.row()]\n current_data_type = self.dataset.get_outcome_type(self.current_outcome)\n outcome_subtype = self.dataset.get_outcome_subtype(self.current_outcome)\n column = index.column()\n\n\n if role in (Qt.DisplayRole, Qt.EditRole):\n if column == self.NAME:\n return QVariant(study.name)\n elif column == self.YEAR:\n if study.year == 0:\n return QVariant(\"\")\n else:\n return QVariant(study.year)\n elif self.current_outcome is not None and column in self.RAW_DATA:\n adjusted_index = column - 3\n if self.current_outcome in study.outcomes_to_follow_ups:\n ma_unit = self.get_current_ma_unit_for_study(index.row())\n cur_raw_data = ma_unit.get_raw_data_for_groups(self.current_txs) \n if len(cur_raw_data) > adjusted_index:\n val = cur_raw_data[adjusted_index]\n if val == \"\" or val is None:\n return QVariant(val)\n try:\n # these are the continuous columns containing sample\n # size; they will be integers, presumably\n N_columns = (self.RAW_DATA[0], self.RAW_DATA[3])\n\n # issue #31 -- make sure digits are consistent\n if current_data_type == CONTINUOUS and not column in N_columns:\n # issue #151 -- show greater precision on double-click\n if role == Qt.EditRole:\n # then we're editing, so show greater precision\n num_digits = NUM_DIGITS_PRECISE \n return QVariant(str(self.format_float(val, num_digits=num_digits)))\n else:\n return QVariant(round(val, self.NUM_DIGITS))\n except:\n #pyqtRemoveInputHook()\n #pdb.set_trace()\n pass\n else:\n return QVariant(\"\")\n else:\n return QVariant(\"\")\n elif column in self.OUTCOMES:\n # more precision in edit moe -- issue #151\n if role == Qt.EditRole:\n # then we're editing, so show greater precision\n num_digits = NUM_DIGITS_PRECISE \n\n group_str = self.get_cur_group_str()\n # either the point estimate, or the lower/upper\n # confidence interval\n outcome_index = column - self.OUTCOMES[0]\n outcome_val = None\n ma_unit = self.get_current_ma_unit_for_study(index.row())\n \n if not self.is_diag():\n eff,grp = self.current_effect, group_str\n \n if current_data_type == BINARY:\n conv_to_disp_scale = lambda x: meta_py_r.binary_convert_scale(x, eff, convert_to=\"display.scale\")\n elif current_data_type == CONTINUOUS:\n conv_to_disp_scale = lambda x: meta_py_r.continuous_convert_scale(x, eff, convert_to=\"display.scale\")\n \n if current_data_type == CONTINUOUS and outcome_subtype == 'generic_effect':\n d_est_and_se = ma_unit.get_display_effect_and_se(eff, grp, conv_to_disp_scale)\n print(\"DEST AND SE: %s\" % str(d_est_and_se))\n outcome_val = d_est_and_se[outcome_index]\n else: # normal case of no outcome subtype\n d_est_and_ci = ma_unit.get_display_effect_and_ci(eff, grp, conv_to_disp_scale)\n outcome_val = d_est_and_ci[outcome_index]\n \n if outcome_val is None:\n return QVariant(\"\")\n return QVariant(self.format_float(outcome_val, num_digits=num_digits)) # issue #31\n else: # This is the diagnostic case\n study_index = index.row()\n # note that we do things quite differently in the diagnostic case,\n # because there is no notion of a 'current effect'. instead,\n # we always show sensitivity and specificity. thus we parse\n # out the estimates and CIs for these manually here.\n m_str = \"Sens\"\n if column in self.OUTCOMES[3:]:\n m_str = \"Spec\"\n \n #est_and_ci = ma_unit.get_effect_and_ci(m_str, group_str)\n #c_val = est_and_ci[outcome_index % 3]\n #outcome_val = meta_py_r.diagnostic_convert_scale(c_val, m_str, convert_to=\"display.scale\") \n \n d_est_and_ci = ma_unit.get_display_effect_and_ci(m_str, group_str)\n outcome_val = d_est_and_ci[outcome_index % 3]\n \n if outcome_val is None:\n return QVariant(\"\")\n \n return QVariant(self.format_float(outcome_val, num_digits=num_digits)) # issue #31\n \n elif column != self.INCLUDE_STUDY and column > max(self.OUTCOMES):\n # here the column is to the right of the outcomes (and not the 0th, or\n # 'include study' column), and thus must correspond to a covariate.\n cov_obj = self.get_cov(column)\n if cov_obj is None:\n return QVariant(\"\")\n \n cov_name = cov_obj.name\n cov_value = study.covariate_dict[cov_name] if \\\n study.covariate_dict.has_key(cov_name) else None\n if cov_value is None:\n cov_value = \"\"\n \n if cov_value != \"\" and cov_obj.data_type == CONTINUOUS:\n return QVariant(self.format_float(cov_value, num_digits=num_digits))\n else:\n # factor\n return QVariant(cov_value)\n elif role == Qt.TextAlignmentRole:\n return QVariant(int(Qt.AlignLeft|Qt.AlignVCenter))\n elif role == Qt.CheckStateRole:\n # this is where we deal with the inclusion/exclusion of studies\n if column == self.INCLUDE_STUDY:\n checked_state = Qt.Unchecked\n if index.row() < self.rowCount()-1 and study.include:\n checked_state = Qt.Checked\n return QVariant(checked_state)\n elif role == Qt.BackgroundColorRole:\n if column in self.OUTCOMES:\n return QVariant(QColor(Qt.yellow))\n elif column in self.RAW_DATA[len(self.RAW_DATA)/2:] and \\\n self.current_effect in ONE_ARM_METRICS:\n return QVariant(QColor(Qt.gray))\n else:\n return QVariant(QColor(Qt.white))\n\n\n def get_cur_group_str(self):\n # we have to build a key (string) here to index into the\n # correct outcome in the meta-analytic unit. the protocol is\n # as follows. if we are dealing with a two group outcome,\n # then the string is:\n # tx A-tx B\n # if we have a one group outcome, the string is just:\n # tx A\n if self.current_effect in ONE_ARM_METRICS:\n group_str = self.current_txs[0] \n else:\n group_str = \"-\".join(self.current_txs)\n return group_str\n \n\n def _verify_raw_data(self, s, col, data_type, index_of_s=None):\n # ignore blank entries\n if s.trimmed() == \"\" or s is None:\n return True, None\n\n if not is_a_float(s):\n return False, \"Raw data needs to be numeric.\"\n\n if data_type in (BINARY, DIAGNOSTIC):\n if not is_an_int(s):\n return False, \"Expecting count data -- you provided a float (?)\"\n if int(s) < 0:\n return False, \"Counts cannot be negative.\"\n \n # fix for issue #193\n # do not allow TxA to be greater than N_A, or TxB to be greater than N_B\n msg = \"Number of events cannot be greater than number of samples.\"\n (row,col) = (index_of_s.row(), index_of_s.column())\n if data_type == BINARY:\n if col in [3,5]: # col is TxA or TxB\n N_samples = self.data(self.index(row, col+1)).toString() # string representation of N_samples\n if is_an_int(N_samples):\n if int(s) > int(N_samples): #uh oh\n return False, msg\n elif col in [4,6]: # col is N_A or N_B\n N_events = self.data(self.index(row, col-1)).toString()\n if is_an_int(N_events):\n if int(s) < int(N_events):\n return False, msg\n \n \n if data_type == CONTINUOUS:\n if float(s) <= 0:\n if col in [3,6]:\n return False,\"Count cannot be zero or negative\"\n if col in [5,8]:\n return False,\"Standard Deviation cannot be zero or negative\"\n \n return True, None\n\n\n def _verify_outcome_data(self, s, col, row, data_type):\n outcome_subtype = self.dataset.get_outcome_subtype(self.current_outcome)\n \n if not is_a_float(s):\n return False, \"Outcomes need to be numeric, you crazy person\"\n\n ma_unit = self.get_current_ma_unit_for_study(row)\n group_str = self.get_cur_group_str()\n \n ###binary_display_scale = meta_py_r.binary_convert_scale(x, metric_name, convert_to)\n if self.current_effect == \"PFT\":\n e1, n1, e2, n2 = self.get_cur_raw_data_for_study(study_index=row)\n binary_display_scale = lambda x: meta_py_r.binary_convert_scale(x, self.current_effect, convert_to=\"display.scale\", n1=n1)\n else:\n binary_display_scale = lambda x: meta_py_r.binary_convert_scale(x, self.current_effect, convert_to=\"display.scale\")\n continuous_display_scale = lambda x: meta_py_r.continuous_convert_scale(x, self.current_effect, convert_to=\"display.scale\")\n \n \n if data_type in [BINARY, CONTINUOUS]: \n prev_est, prev_lower, prev_upper = ma_unit.get_effect_and_ci(self.current_effect, group_str, self.get_mult())\n if data_type == BINARY:\n prev_est, prev_lower, prev_upper = [binary_display_scale(x) for x in [prev_est, prev_lower, prev_upper]]\n print(\"Previous binary: %s\" % str([prev_est, prev_lower, prev_upper]))\n elif data_type == CONTINUOUS:\n #prev_est, prev_lower, prev_upper = ma_unit.get_display_effect_and_ci(self.current_effect, group_str)\n prev_est, prev_lower, prev_upper = [continuous_display_scale(x) for x in [prev_est, prev_lower, prev_upper]]\n print(\"Previous continuous: %s\" % str([prev_est, prev_lower, prev_upper]))\n elif data_type == DIAGNOSTIC:\n m_str = \"Sens\" if col in self.OUTCOMES[:3] else \"Spec\"\n #prev_est, prev_lower, prev_upper = ma_unit.get_display_effect_and_ci(m_str, group_str)\n prev_est, prev_lower, prev_upper = ma_unit.get_effect_and_ci(m_str, group_str, self.get_mult())\n prev_est, prev_lower, prev_upper = [meta_py_r.diagnostic_convert_scale(x, m_str, convert_to=\"display.scale\") for x in [prev_est, prev_lower, prev_upper]]\n print(\"Previous diagnostic: %s\" % str([prev_est, prev_lower, prev_upper]))\n \n # here we check if there is raw data for this study; \n # if there is, we don't allow entry of outcomes\n raw_data = self.get_cur_raw_data_for_study(row)\n \n if not all([is_empty(s_i) for s_i in raw_data]):\n # fix for issue #180 \n # sort of hacky. we check here to see if the outcome\n # in fact was \"changed\", by which we mean the value\n # has been set to a 'sufficiently' different\n # value. this avoids the UI annoyingly bugging users when\n # they are tabbing along. probably a better fix would\n # be to modify the actual tabbing behavior of the spreadsheet\n # for the last 'raw data' column.\n d = dict(zip(self.OUTCOMES, [prev_est, prev_lower, prev_upper]))\n new_val = float(s)\n previously_was_none = d[col] is None\n delta = None\n if previously_was_none:\n # then it was previously not set;\n # go ahead and let the user override.\n delta = float(\"-inf\")\n else:\n delta = abs(new_val - d[col])\n print \"new val {0}, prev val {1}\".format(new_val, d[col])\n print \"DELTA {0}\".format(delta)\n epsilon = 10E-6 \n if delta > epsilon:\n return False, '''You have already entered raw data for this study. If you want to enter the outcome directly, delete the raw data first.'''\n\n if s.trimmed() == '':\n # in this case, they've deleted a value\n # (i.e., left it blank) -- this is OK.\n return True, None \n if self.current_effect in (\"OR\", \"RR\"):\n if float(s) < 0:\n return False, \"Ratios cannot be negative.\"\n \n #figure out why type of column we are in\n fields = [\"est\",\"lower\",\"upper\"]\n if data_type == DIAGNOSTIC:\n fields.extend(fields[:])\n col_to_type = dict(zip(self.OUTCOMES,fields))\n val_str = col_to_type[col]\n \n if outcome_subtype == \"generic_effect\":\n if col == self.OUTCOMES[1]: # se column\n if float(s) < 0:\n return False, \"Standard Error cannot be negative\"\n else:\n def is_between_bounds(est=prev_est, low=prev_lower, high=prev_upper):\n return calc_fncs.between_bounds(est=est, low=low, high=high)\n good_result = None\n if val_str == \"est\":\n (good_result,msg) = is_between_bounds(est=float(s))\n elif val_str == \"lower\":\n (good_result,msg) = is_between_bounds(low=float(s))\n elif val_str == \"upper\":\n (good_result,msg) = is_between_bounds(high=float(s))\n assert not good_result is None, \"Why don't we have a result for what outcome we're in?\"\n if not good_result:\n return False, msg\n\n return True, None\n\n def _verify_year(self, s):\n if s.trimmed() == '':\n return True, None\n\n if not is_an_int(s):\n return False, \"Years need to be integers.\"\n\n return True, None\n\n\n def setData(self, index, value, role=Qt.EditRole, import_csv=False, allow_empty_names=False):\n '''\n Implementation of the AbstractDataTable method. The view uses this method\n to request data to display. Thus we here return values to render in the table\n based on the index (row, column).\n\n For more, see: http://doc.trolltech.com/4.5/qabstracttablemodel.html\n '''\n group_str = self.get_cur_group_str()\n study_added_due_to_edit = None\n if index.isValid() and 0 <= index.row() < len(self.dataset):\n current_data_type = self.dataset.get_outcome_type(self.current_outcome)\n outcome_subtype = self.dataset.get_outcome_subtype(self.current_outcome)\n column = index.column()\n old_val = self.data(index)\n study = self.dataset.studies[index.row()]\n else:\n return False\n \n if column == self.NAME:\n # proposed study name\n name = unicode(value.toString().toUtf8(), encoding=\"utf8\")\n \n if name == \"\" and not allow_empty_names:\n # just ignore -- we don't allow empty study names\n return False\n # if we already have the name and the name is not just the current name\n if name in self.dataset.get_study_names() and name != study.name:\n msg = \"Duplicate study names not allowed\"\n self.emit(SIGNAL(\"dataError(QString)\"), QString(msg))\n return False\n # the second clause here is to address issue #233,\n # specifically we do not add a dummy study if the \n # current study (as indexed by index.row()) is 'blank'.\n # note that for us to have even gotten this far in such\n # a case means the allow_empty_names flag is True.\n elif index.row() == self.rowCount()-DUMMY_ROWS-1 and not name==\"\":\n # if the last study was just edited, append a\n # new, blank study\n # TODO bug: if a new tx group is added, and then a new study\n # is added, the program throws up because the study doesn't have\n # the new outcome in its meta-analytic unit object -- need to check\n # for this at runtime as we do with follow-up and outcome\n new_study = Study(self.max_study_id()+1)\n # issue #133 fix; exclude newly added studies by default\n new_study.include=False\n self.dataset.add_study(new_study)\n self.study_auto_added = int(new_study.id)\n study_added_due_to_edit = int(new_study.id)\n self.reset()\n # new_index is where the user *should* be editing.\n new_index = self.index(index.row(), index.column()+1)\n self.emit(SIGNAL(\"modelReset(QModelIndex)\"), new_index)\n \n # study name is good to go\n study.name = unicode(value.toString().toUtf8(), encoding=\"utf8\")\n \n elif column == self.YEAR:\n year_ok, msg = self._verify_year(value.toString())\n if not year_ok:\n self.emit(SIGNAL(\"dataError(QString)\"), QString(msg))\n return False\n study.year = value.toInt()[0]\n elif self.current_outcome is not None and column in self.RAW_DATA:\n data_ok, msg = self._verify_raw_data(value.toString(), column, current_data_type, index)\n if not data_ok:\n # this signal is (-- presumably --) handled by the UI\n # i.e., meta_form, which reports the problem to the\n # user. the model is not affected.\n self.emit(SIGNAL(\"dataError(QString)\"), QString(msg))\n return False\n\n # @TODO make module-level constant?\n adjust_by = 3 # include study, study name, year columns\n ma_unit = self.get_current_ma_unit_for_study(index.row())\n group_name = self.current_txs[0]\n if current_data_type == BINARY:\n if column in self.RAW_DATA[2:]:\n adjust_by += 2 \n group_name = self.current_txs[1]\n elif current_data_type == CONTINUOUS:\n if column in self.RAW_DATA[3:]:\n adjust_by += 3\n group_name = self.current_txs[1]\n else:\n # diagnostic\n pass\n \n adjusted_index = column-adjust_by\n val = value.toDouble()[0] if value.toDouble()[1] else \"\"\n ma_unit.tx_groups[group_name].raw_data[adjusted_index] = val\n \n # If a raw data column value is being edited, attempt to\n # update the corresponding outcome (if data permits)\n self.update_outcome_if_possible(index.row())\n \n \n elif column in self.OUTCOMES:\n print(\"Value %s in outcomes\" % str(value.toString()))\n \n row = index.row()\n \n if value.toString().trimmed() == \"\":\n delete_value = True \n display_scale_val = None\n calc_scale_val = None\n else:\n # sanity check -- is this a number?\n data_ok, msg = self._verify_outcome_data(value.toString(), column, row, current_data_type)\n if not data_ok and import_csv == False:\n self.emit(SIGNAL(\"dataError(QString)\"), QString(msg))\n return False\n\n # the user can also explicitly set the effect size / CIs\n # @TODO what to do if the entered estimate contradicts the raw data?\n display_scale_val, converted_ok = value.toDouble()\n \n print(\"Display scale value: %s\" % str(display_scale_val))\n\n if display_scale_val is None or converted_ok:\n if not self.is_diag():\n # note that we convert from the display/continuous\n # scale on which the metric is assumed to have been\n # entered into the 'calculation' scale (e.g., log)\n calc_scale_val = None\n print(\"Input value is %s\" % str(display_scale_val))\n\n if self.current_effect == \"PFT\":\n e1, n1, e2, n2 = self.get_cur_raw_data_for_study(study_index=row)\n calc_scale_val = self._get_calc_scale_value(display_scale_val,\n data_type=current_data_type,\n effect=self.current_effect,n1=n1)\n conv_to_disp_scale = self._get_conv_to_display_scale(data_type=current_data_type,\n effect=self.current_effect, n1=n1)\n else:\n calc_scale_val = self._get_calc_scale_value(display_scale_val,\n data_type=current_data_type,\n effect=self.current_effect)\n conv_to_disp_scale = self._get_conv_to_display_scale(data_type=current_data_type,\n effect=self.current_effect)\n \n ma_unit = self.get_current_ma_unit_for_study(index.row())\n if outcome_subtype == \"generic_effect\":\n if column == self.OUTCOMES[0]: #estimate\n ma_unit.set_effect(self.current_effect, group_str, calc_scale_val)\n #ma_unit.set_display_effect(self.current_effect, group_str, display_scale_val)\n elif column == self.OUTCOMES[1]: # se\n ma_unit.set_SE(self.current_effect, group_str, calc_scale_val)\n #ma_unit.set_display_se(self.current_effect, group_str, display_scale_val)\n else: # normal case\n if column == self.OUTCOMES[0]: # estimate\n print(\"Setting estimate: %s\" % str(calc_scale_val))\n ma_unit.set_effect(self.current_effect, group_str, calc_scale_val)\n #ma_unit.set_display_effect(self.current_effect, group_str, display_scale_val)\n elif column == self.OUTCOMES[1]: #lower\n ma_unit.set_lower(self.current_effect, group_str, calc_scale_val)\n #ma_unit.set_display_lower(self.current_effect, group_str, display_scale_val)\n else: #upper\n ma_unit.set_upper(self.current_effect, group_str, calc_scale_val)\n #ma_unit.set_display_upper(self.current_effect, group_str, display_scale_val)\n print(\"calculating se\")\n \n # in normal case, only calculate SE when all data is filled in\n if None not in ma_unit.get_entered_effect_and_ci(self.current_effect, group_str): \n se = ma_unit.calculate_SE_if_possible(self.current_effect, group_str, mult=self.mult)\n print(\"setting se to %s\" % str(se))\n else:\n se = None\n ma_unit.set_SE(self.current_effect, group_str, se)\n \n # Now calculate display_effect and CI\n ma_unit.calculate_display_effect_and_ci(\n self.current_effect, group_str,\n conv_to_disp_scale,\n conf_level=self.get_global_conf_level(),\n mult=self.mult)\n\n \n else: #outcome is diagnostic\n ma_unit = self.get_current_ma_unit_for_study(index.row())\n # figure out if this column is sensitivity or specificity\n m_str = \"Sens\"\n if column in self.OUTCOMES[3:]:\n # by convention, the last three columns are specificity\n m_str = \"Spec\"\n \n calc_scale_val = self._get_calc_scale_value(display_scale_val,\n data_type=current_data_type,\n effect=m_str)\n \n # now we switch on what outcome column we're on ... kind of ugly, but eh.\n if column in (self.OUTCOMES[0], self.OUTCOMES[3]):\n ma_unit.set_effect(m_str, group_str, calc_scale_val)\n #ma_unit.set_display_effect(m_str, group_str, display_scale_val)\n elif column in (self.OUTCOMES[1], self.OUTCOMES[4]):\n ma_unit.set_lower(m_str, group_str, calc_scale_val)\n #ma_unit.set_display_lower(m_str, group_str, display_scale_val) \n else:\n ma_unit.set_upper(m_str, group_str, calc_scale_val)\n #ma_unit.set_display_upper(m_str, group_str, display_scale_val)\n conv_to_display_scale = self._get_conv_to_display_scale(self,\n data_type=current_data_type,\n effect=m_str)\n ma_unit.calculate_display_effect_and_ci(\n m_str, group_str, conv_to_disp_scale,\n conf_level=self.get_global_conf_level(),\n mult=self.mult)\n \n elif column == self.INCLUDE_STUDY:\n study.include = value.toBool()\n # we keep note if a study was manually \n # excluded; this differs from just being\n # `included' because the latter is TRUE\n # automatically when a study first acquires\n # sufficient data to be included in an MA\n if not value.toBool():\n study.manually_excluded = True\n else:\n study.manually_excluded = False\n else:\n # then a covariate value has been edited.\n cov = self.get_cov(column)\n cov_name = cov.name\n new_value = None\n if cov.data_type == FACTOR:\n new_value = value.toString()\n else:\n # continuous\n new_value, converted_ok = value.toDouble()\n if not converted_ok: \n new_value = None\n study.covariate_dict[cov_name] = new_value\n \n self.emit(SIGNAL(\"dataChanged(QModelIndex, QModelIndex)\"), index, index)\n\n # tell the view that an entry in the table has changed, and what the old\n # and new values were. This for undo/redo purposes.\n new_val = self.data(index)\n\n self.emit(SIGNAL(\"pyCellContentChanged(PyQt_PyObject, PyQt_PyObject, PyQt_PyObject, PyQt_PyObject)\"), \n index, old_val, new_val, study_added_due_to_edit)\n \n if not self.is_diag():\n group_str = self.get_cur_group_str()\n\n print group_str\n print \"ok checking it; cur outcome: %s. cur group: %s\" % (self.current_outcome, group_str)\n if self.current_outcome is not None:\n effect_d = self.get_current_ma_unit_for_study(index.row()).effects_dict[self.current_effect][group_str]\n print effect_d\n \n \n # if the study has not been explicitly excluded by the user, then we automatically\n # include it once it has sufficient data.\n if not study.manually_excluded:\n study.include = True\n \n if current_data_type == CONTINUOUS and outcome_subtype == \"generic_effect\":\n if None in [effect_d[key] for key in [\"est\",\"SE\"]]:\n study.include = False\n else: # normal case, binary or continuous\n # if any of the effect values are empty, we cannot include this study in the analysis, so it\n # is automatically excluded.\n if any([val is None for val in [effect_d[effect_key] for effect_key in (\"upper\", \"lower\", \"est\")]]):\n study.include = False\n return True\n \n \n @staticmethod\n def helper_basic_horizontal_headerData(section, data_type, sub_type,\n raw_columns, outcome_columns, current_effect, groups, outcome_not_None=True):\n ''' Allows access to basic display role headerData information w/o\n having to make a data model '''\n \n if section == DatasetModel.INCLUDE_STUDY:\n return QVariant(DatasetModel.headers[DatasetModel.INCLUDE_STUDY])\n elif section == DatasetModel.NAME:\n return QVariant(DatasetModel.headers[DatasetModel.NAME])\n elif section == DatasetModel.YEAR:\n return QVariant(DatasetModel.headers[DatasetModel.YEAR])\n # note that we're assuming here that raw data\n # always shows only two tx groups at once.\n elif outcome_not_None and section in raw_columns:\n # switch on the outcome type \n current_tx = groups[0] # i.e., the first group\n if data_type == BINARY:\n if section in raw_columns[2:]:\n current_tx = groups[1]\n \n if section in (raw_columns[0], raw_columns[2]):\n return QVariant(current_tx + \" #evts\")\n else:\n return QVariant(current_tx + \" #total\")\n elif data_type == CONTINUOUS:\n # continuous data\n if len(raw_columns) < 6:\n return QVariant(\"\")\n \n if sub_type == \"generic_effect\":\n return QVariant(\"\")\n else:\n if section in raw_columns[3:]:\n current_tx = groups[1]\n if section in (raw_columns[0], raw_columns[3]):\n return QVariant(current_tx + \" N\")\n elif section in (raw_columns[1], raw_columns[4]):\n return QVariant(current_tx + \" mean\")\n else:\n return QVariant(current_tx + \" SD\")\n elif data_type == DIAGNOSTIC:\n # ordering per sir Tom Trikalinos\n # \"it makes sense -- it goes like this in the matrix!\"\n # - (said while making bizarre gesticulation) Tom.\n if section == raw_columns[0]:\n return QVariant(\"TP\")\n elif section == raw_columns[1]:\n return QVariant(\"FN\")\n elif section == raw_columns[2]:\n return QVariant(\"FP\")\n else:\n return QVariant(\"TN\")\n \n elif section in outcome_columns:\n if data_type == BINARY:\n # effect size, lower CI, upper CI\n if section == outcome_columns[0]:\n return QVariant(current_effect)\n elif section == outcome_columns[1]:\n return QVariant(\"lower\")\n else:\n return QVariant(\"upper\")\n elif data_type == CONTINUOUS:\n if sub_type == \"generic_effect\":\n if section == outcome_columns[0]:\n return QVariant(current_effect)\n if section == outcome_columns[1]:\n return QVariant(\"se\")\n else: # normal case with no outcome_subtype\n if section == outcome_columns[0]:\n return QVariant(current_effect)\n elif section == outcome_columns[1]:\n return QVariant(\"lower\")\n elif section == outcome_columns[2]:\n return QVariant(\"upper\")\n elif data_type == DIAGNOSTIC:\n #### \n # we're going to do three columns per outcome\n # est, lower, upper\n outcome_index = section - outcome_columns[0]\n outcome_headers = [\"sens.\", \"lower\", \"upper\", \"spec.\", \"lower\", \"upper\"]\n return QVariant(outcome_headers[outcome_index])\n \n return None # Only get here if section doesn't match\n \n\n \n\n def headerData(self, section, orientation, role=Qt.DisplayRole):\n '''\n Implementation of the abstract method inherited from the base table\n model class. This is responsible for providing header data for the\n respective columns.\n '''\n outcome_type = self.dataset.get_outcome_type(self.current_outcome)\n outcome_subtype = self.dataset.get_outcome_subtype(self.current_outcome)\n length_dataset = len(self.dataset)\n \n sectionOK = section < length_dataset\n ############################### TOOLTIPS ###############################\n if role == Qt.ToolTipRole:\n if orientation == QtCore.Qt.Horizontal:\n #return QtCore.QString(\"Horizontal Header %s Tooltip\" % str(section))\n if section == self.INCLUDE_STUDY:\n return QString(\"Check if you want to include this study in the meta-analysis\")\n elif section == self.NAME:\n return QString(\"Name to identify the study\")\n elif section == self.YEAR:\n return QString(\"Year of publication\")\n elif self.current_outcome is not None and section in self.RAW_DATA:\n # switch on the outcome type \n current_tx = self.current_txs[0] # i.e., the first group\n \n rename_col_msg = \"\\nRename group by right-clicking the column header and selecting 'rename group...'\"\n sort_msg = \"\\nSort on this column by right-clicking the column header and selecting 'sort studies...'\"\n if outcome_type == BINARY:\n if section in self.RAW_DATA[2:]:\n current_tx = self.current_txs[1]\n \n if section in (self.RAW_DATA[0], self.RAW_DATA[2]):\n num_events_msg = \"# of Events in group {0} (numerator)\".format(current_tx)\n return QString(num_events_msg + rename_col_msg + sort_msg)\n else:\n num_sujets_msg = \"# of Subjects in group {0} (numerator)\".format(current_tx)\n return QString(num_sujets_msg + rename_col_msg + sort_msg)\n elif outcome_type == CONTINUOUS:\n # continuous data\n if outcome_subtype == \"generic_effect\":\n # Logic note: we should never reach this point\n return QString(\"leave me alone!\")\n \n else: # normal case with no outcome subtype\n if section in self.RAW_DATA[3:]:\n current_tx = self.current_txs[1]\n \n if section in (self.RAW_DATA[0], self.RAW_DATA[3]):\n N_sujets_msg = \"# Subjects in group {0}\".format(current_tx)\n return QString(N_sujets_msg + rename_col_msg + sort_msg)\n elif section in (self.RAW_DATA[1], self.RAW_DATA[4]):\n mean_msg = \"Mean of group %s\" % current_tx\n return QString(mean_msg + rename_col_msg + sort_msg)\n else:\n sd_msg = \"Standard Deviation of group %s\" % current_tx\n return QString(sd_msg)\n elif outcome_type == DIAGNOSTIC:\n if section == self.RAW_DATA[0]:\n return QString(\"# True Positives\" + sort_msg)\n elif section == self.RAW_DATA[1]:\n return QString(\"# False Negatives\" + sort_msg)\n elif section == self.RAW_DATA[2]:\n return QString(\"# False Positives\" + sort_msg)\n else:\n return QString(\"# True Negatives\" + sort_msg)\n elif section in self.OUTCOMES:\n help_msg = \"For information about how the confidence interval was obtained,\\n\"\n help_msg += \"please consult the the help at {0}\".format(HELP_URL)\n lower_msg = \"Lower bound of {0:.1%} confidence interval\".format(self.conf_level/100.0)\n lower_msg += \"\\n\" + help_msg\n upper_msg = \"Upper bound of {0:.1%} confidence interval\\n\".format(self.conf_level/100.0)\n upper_msg += \"\\n\" + help_msg\n se_msg = \"Standard Error\"\n \n if outcome_type == BINARY:\n # effect size, lower CI, upper CI\n if section == self.OUTCOMES[0]:\n return QString(BINARY_METRIC_NAMES[self.current_effect])\n elif section == self.OUTCOMES[1]:\n return QString(lower_msg)\n else:\n return QString(upper_msg)\n elif outcome_type == CONTINUOUS:\n if outcome_subtype == \"generic_effect\":\n if section == self.OUTCOMES[0]:\n return QString(CONTINUOUS_METRIC_NAMES[self.current_effect])\n if section == self.OUTCOMES[1]:\n return QString(se_msg)\n else: # normal case with no outcome_subtype\n if section == self.OUTCOMES[0]:\n return QString(CONTINUOUS_METRIC_NAMES[self.current_effect])\n elif section == self.OUTCOMES[1]:\n return QString(lower_msg)\n elif section == self.OUTCOMES[2]:\n return QString(upper_msg)\n \n \n elif outcome_type == DIAGNOSTIC:\n if section in (self.OUTCOMES[1],self.OUTCOMES[4]):\n return QString(lower_msg)\n elif section in (self.OUTCOMES[2],self.OUTCOMES[5]):\n return QString(upper_msg)\n else: # in metric name\n if section == self.OUTCOMES[0]: # Sens\n return QString(DIAGNOSTIC_METRIC_NAMES[\"Sens\"])\n elif section == self.OUTCOMES[3]: # Spec\n return QString(DIAGNOSTIC_METRIC_NAMES[\"Spec\"])\n \n else: # vertical\n if sectionOK:\n return QtCore.QString(\"Use calculator to fill-in missing information\")\n \n # For cool calculator icon\n if role == Qt.DecorationRole:\n if orientation == Qt.Vertical:\n if sectionOK:\n return QIcon(\":/misc/calculator-34.png\")\n else:\n #print \"\\n\\n----\\n\\n\"\n #print section\n #print len(self.dataset)\n #print self.dataset.studies\n #print self.dataset.get_study_names()\n #pyqtRemoveInputHook()\n #pdb.set_trace()\n return QVariant()\n\n \n if role == Qt.TextAlignmentRole:\n return QVariant(int(Qt.AlignLeft|Qt.AlignVCenter))\n\n ############################# DISPLAY ROLE #############################\n if role == Qt.DisplayRole:\n if orientation == Qt.Horizontal:\n \n res = self.helper_basic_horizontal_headerData(\n section,\n data_type=outcome_type,\n sub_type=outcome_subtype,\n raw_columns=self.RAW_DATA,\n outcome_columns=self.OUTCOMES,\n current_effect=self.current_effect,\n groups=self.current_txs,\n outcome_not_None=self.current_outcome is not None)\n if res:\n return res \n elif self.current_outcome is not None and section > max(self.OUTCOMES):\n # then the column is to the right of the outcomes, and must\n # be a covariate.\n ### issue #156 -- always show covariate type\n cur_cov = self.get_cov(section)\n if cur_cov == None:\n return QVariant(\"\")\n \n cov_name = cur_cov.name\n cov_type = cur_cov.get_type_str()\n # note that I'm only returning the *first* letter\n # of the type (c or f) because the whole thing\n # is too long..\n return QVariant(\"%s (%s)\" % (cov_name, cov_type[0]))\n else:\n # pass, basically\n return QVariant(\"\")\n else: # vertical case\n # this is the vertical -- non-table header -- case. \n # we just show row numbers (not zero-based; hence the +1).\n return QVariant(int(section+1))\n \n\n \n \n\n\n def flags(self, index):\n if not index.isValid():\n return Qt.ItemIsEnabled\n elif index.column() == self.INCLUDE_STUDY:\n return Qt.ItemFlags(Qt.ItemIsUserCheckable | Qt.ItemIsEnabled |\n Qt.ItemIsUserCheckable | Qt.ItemIsSelectable)\n return Qt.ItemFlags(QAbstractTableModel.flags(self, index)|\n Qt.ItemIsEditable)\n\n def rowCount(self, index=QModelIndex()):\n return self.dataset.num_studies() + DUMMY_ROWS\n\n def columnCount(self, index=QModelIndex()):\n return self._get_col_count()\n\n def get_cov(self, table_col_index):\n # we map (ie, adjust) the table column index to the covariate\n # index. if there is currently an outcome, this means we\n # subtract off the indices up to the last outcomes column; otherwise\n # we just subtract the include, study name and year columns (giving 3)\n cov_index = table_col_index - (self.OUTCOMES[-1]+1) if self.current_outcome is not None else table_col_index - 3\n try:\n return self.dataset.covariates[cov_index]\n except:\n print(\"There is no covariate at that index\")\n return None\n \n def get_covariate_names(self):\n return [cov.name for cov in self.dataset.covariates]\n\n def rename_covariate(self, old_cov_name, new_cov_name):\n old_cov_obj = self.dataset.get_cov_obj_from_name(old_cov_name)\n self.dataset.change_covariate_name(old_cov_obj, new_cov_name)\n self.reset()\n\n def _get_col_count(self):\n '''\n Calculate how many columns to display; this is contingent on the data type,\n amongst other things (e.g., number of covariates).\n '''\n num_cols = 3 # we always show study name and year (and include studies)\n if len(self.dataset.get_outcome_names()) > 0:\n num_effect_size_fields = 3 # point estimate, low, high\n outcome_type = self.dataset.get_outcome_type(self.current_outcome)\n outcome_subtype = self.dataset.get_outcome_subtype(self.current_outcome)\n if outcome_subtype == \"generic_effect\":\n num_effect_size_fields = 2 # point estimate, se\n if outcome_type == DIAGNOSTIC:\n # we have two for diagnostic; sensitivity and specifity.\n # we will display the est, lower, and upper for both of these.\n num_effect_size_fields = 6\n \n num_cols += num_effect_size_fields + self.num_data_cols_for_current_unit()\n # now add the covariates (if any)\n num_cols += len(self.dataset.covariates)\n return num_cols\n \n def get_ordered_study_ids(self):\n return [study.id for study in self.dataset.studies]\n\n def add_new_outcome(self, name, data_type, sub_type=None):\n data_type = STR_TO_TYPE_DICT[data_type.lower()]\n self.dataset.add_outcome(Outcome(name, data_type, sub_type=sub_type))\n\n def remove_outcome(self, outcome_name):\n self.dataset.remove_outcome(outcome_name)\n \n def add_new_group(self, name):\n self.dataset.add_group(name, self.current_outcome)\n \n def remove_group(self, group_name):\n self.dataset.remove_group(group_name)\n \n def rename_group(self, old_group_name, new_group_name):\n self.dataset.change_group_name(old_group_name, new_group_name)\n if old_group_name in self.current_txs:\n group_index = self.current_txs.index(old_group_name)\n # now remove the old group from the list of current groups\n self.current_txs.pop(group_index)\n self.current_txs.insert(group_index, new_group_name)\n self.reset()\n\n def add_follow_up_to_current_outcome(self, follow_up_name):\n self.dataset.add_follow_up_to_outcome(self.current_outcome, follow_up_name)\n \n def remove_follow_up_from_outcome(self, follow_up_name, outcome_name):\n self.dataset.remove_follow_up_from_outcome(follow_up_name, outcome_name)\n \n def add_covariate(self, covariate_name, covariate_type, cov_values=None):\n self.dataset.add_covariate(Covariate(covariate_name, covariate_type),\n cov_values=cov_values)\n self.reset()\n \n def remove_covariate(self, covariate_name):\n self.dataset.remove_covariate(covariate_name)\n self.reset()\n \n def remove_study(self, an_id):\n self.dataset.studies.pop(an_id)\n self.reset()\n\n def get_name(self):\n return self.dataset.title\n\n def get_next_outcome_name(self):\n outcomes = self.dataset.get_outcome_names()\n cur_index = outcomes.index(self.current_outcome)\n next_outcome = outcomes[0] if cur_index == len(outcomes)-1\\\n else outcomes[cur_index+1]\n return next_outcome\n\n def get_prev_outcome_name(self):\n outcomes = self.dataset.get_outcome_names()\n cur_index = outcomes.index(self.current_outcome)\n prev_outcome = outcomes[-1] if cur_index == 0 \\\n else outcomes[cur_index-1]\n return prev_outcome\n\n def get_next_follow_up(self):\n print \"\\nfollow ups for outcome:\"\n print self.dataset.outcome_names_to_follow_ups[self.current_outcome]\n t_point = self.current_time_point\n if self.current_time_point >= max(self.dataset.outcome_names_to_follow_ups[self.current_outcome].keys()):\n t_point = 0\n else:\n # WARNING if we delete a time point things might get screwed up here\n # as we're actually using the MAX when we insert new follow ups\n # TODO change this to look for the next greatest time point rather than\n # assuming the current + 1 exists\n t_point += 1\n follow_up_name = self.get_follow_up_name_for_t_point(t_point)\n print \"\\nt_point; name: %s, %s\" % (t_point, follow_up_name)\n return (t_point, follow_up_name)\n \n def get_previous_follow_up(self):\n t_point = self.current_time_point\n if self.current_time_point <= min(self.dataset.outcome_names_to_follow_ups[self.current_outcome].keys()):\n t_point = max(self.dataset.outcome_names_to_follow_ups[self.current_outcome].keys())\n else:\n # WARNING if we delete a time point things might get screwed up here\n # as we're actually using the MAX when we insert new follow ups\n # TODO change this to look for the next greatest time point rather than\n # assuming the current - 1 exists\n t_point -= 1\n return (t_point, self.get_follow_up_name_for_t_point(t_point))\n \n def set_current_time_point(self, time_point):\n self.current_time_point = time_point\n self.emit(SIGNAL(\"followUpChanged()\"))\n self.reset()\n \n def set_current_follow_up(self, follow_up_name):\n t_point = self.dataset.outcome_names_to_follow_ups[self.current_outcome].get_key(follow_up_name)\n self.set_current_time_point(t_point)\n\n def get_current_follow_up_name(self):\n if len(self.dataset.outcome_names_to_follow_ups) > 0:\n try:\n return self.dataset.outcome_names_to_follow_ups[self.current_outcome][self.current_time_point]\n except:\n return None\n \n def get_follow_up_name_for_t_point(self, t_point):\n return self.dataset.outcome_names_to_follow_ups[self.current_outcome][t_point]\n \n def get_t_point_for_follow_up_name(self, follow_up):\n return self.dataset.outcome_names_to_follow_ups[self.current_outcome].get_key(follow_up)\n \n def get_current_groups(self):\n return self.current_txs\n \n def get_previous_groups(self):\n return self.previous_txs\n \n def next_groups(self):\n ''' Returns a tuple with the next two group names (we just iterate round-robin) '''\n if len(self.dataset.get_group_names()) == 0:\n return []\n \n ## notice that we only retrieve the group names that belong\n # to the current outcome/follow-up tuple\n group_names = self.dataset.get_group_names_for_outcome_fu(self.current_outcome, self.get_current_follow_up_name())\n \n self._next_group_indices(group_names)\n \n if not self.is_diag():\n # shuffle over groups\n while self.tx_index_a == self.tx_index_b:\n self._next_group_indices(group_names)\n else:\n self._next_group_index(group_names)\n \n next_txs = [group_names[self.tx_index_a], group_names[self.tx_index_b]]\n print \"new tx group indices a, b: %s, %s\" % (self.tx_index_a, self.tx_index_b)\n return next_txs\n \n\n def _next_group_indices(self, group_names):\n print \"\\ngroup names: %s\" % group_names\n if self.tx_index_b < len(group_names)-1:\n self.tx_index_b += 1\n else:\n # bump the a index\n if self.tx_index_a < len(group_names)-1:\n self.tx_index_a += 1\n else:\n self.tx_index_a = 0\n self.tx_index_b = 0\n\n\n def _next_group_index(self, group_names):\n # increments tx A; ignores B\n if self.tx_index_a < len(group_names)-1:\n self.tx_index_a += 1\n else:\n self.tx_index_a = 0\n \n def outcome_has_follow_up(self, outcome, follow_up):\n if outcome is None:\n print(\"Tried to reference None outcome\")\n return None\n outcome_d = self.dataset.outcome_names_to_follow_ups[outcome]\n\n return follow_up in outcome_d.keys()\n \n def outcome_fu_has_group(self, outcome, follow_up, group):\n ## we just pull the outcome from the first study; we tacitly\n # assume that all studies have the same outcomes/follow-ups.\n # \n outcome_d = self.dataset.studies[0].outcomes_to_follow_ups[outcome]\n\n ## we _assume_ that the follow_up is in this outcome!\n return group in outcome_d[follow_up].tx_groups.keys()\n \n def set_current_groups(self, group_names):\n self.previous_txs = self.current_txs\n self.current_txs = group_names\n self.tx_index_a = self.dataset.get_group_names().index(group_names[0])\n self.tx_index_b = self.dataset.get_group_names().index(group_names[1])\n print \"\\ncurrent tx group index a, b: %s, %s\" % (self.tx_index_a, self.tx_index_b)\n \n def get_group_names(self):\n return self.dataset.get_group_names()\n\n def sort_studies(self, col, reverse):\n if col == self.NAME:\n self.dataset.studies.sort(cmp = self.dataset.cmp_studies(compare_by=\"name\", reverse=reverse, mult=self.get_mult()), reverse=reverse)\n elif col == self.YEAR:\n self.dataset.studies.sort(cmp = self.dataset.cmp_studies(compare_by=\"year\", reverse=reverse, mult=self.get_mult()), reverse=reverse)\n elif col in self.RAW_DATA:\n #data_type = self.dataset.get_outcome_type(self.current_outcome)\n # need this to dig down to find right ma_unit and data we're looking for to compare against\n ma_unit_reference_info = {'outcome_name': self.current_outcome, \n 'follow_up': self.get_follow_up_name_for_t_point(self.current_time_point),\n 'current_groups': self.get_current_groups(),\n 'data_index': col - min(self.RAW_DATA)}\n self.dataset.studies.sort(cmp = self.dataset.cmp_studies(compare_by=\"raw_data\", \n reverse=reverse, directions_to_ma_unit=ma_unit_reference_info, mult=self.get_mult()), reverse=reverse)\n elif col in self.OUTCOMES:\n # need this to dig down to find right ma_unit and data we're looking for to compare against\n ma_unit_reference_info = {\n 'outcome_type': self.dataset.get_outcome_type(self.current_outcome),\n 'outcome_name': self.current_outcome, \n 'follow_up': self.get_follow_up_name_for_t_point(self.current_time_point),\n 'current_groups': self.get_current_groups(),\n 'current_effect': self.current_effect,\n 'group_str': self.get_cur_group_str(),\n 'data_index': col - min(self.OUTCOMES)\n }\n self.dataset.studies.sort(cmp = self.dataset.cmp_studies(compare_by=\"outcomes\", \n reverse=reverse, directions_to_ma_unit=ma_unit_reference_info, mult=self.get_mult()),\n reverse=reverse)\n \n \n # covariates -- note that we assume anything to the right of the outcomes\n # is a covariate\n elif col > self.OUTCOMES[-1]:\n cov = self.get_cov(col)\n self.dataset.studies.sort(cmp = self.dataset.cmp_studies(\\\n compare_by=cov.name, reverse=reverse, mult=self.get_mult()), reverse=reverse)\n\n self.reset()\n\n def order_studies(self, ids):\n ''' Shuffles studies vector to the order specified by ids'''\n ordered_studies = []\n for an_id in ids:\n for study in self.dataset.studies:\n if study.id == an_id:\n ordered_studies.append(study)\n break\n self.dataset.studies = ordered_studies\n self.reset()\n\n def set_current_outcome(self, outcome_name):\n self.current_outcome = outcome_name\n self.update_column_indices()\n self.update_cur_tx_effect()\n self.emit(SIGNAL(\"outcomeChanged()\"))\n self.reset()\n \n def update_cur_tx_effect(self):\n outcome_type = self.dataset.get_outcome_type(self.current_outcome)\n if outcome_type == BINARY:\n self.current_effect = \"OR\"\n elif outcome_type == CONTINUOUS:\n self.current_effect = \"MD\"\n else:\n # diagnostic -- what should we do here? we show\n # sensitivity/specificity; I don't think there's a\n # notion of a `current effect'...\n self.current_effect = None\n \n def max_study_id(self):\n return self.dataset.max_study_id()\n\n def num_data_cols_for_current_unit(self):\n '''\n Returns the number of columns needed to display the raw data\n given the current data type (binary, etc.)\n \n Note again that outcome names are necessarily unique!\n '''\n data_type = self.dataset.get_outcome_type(self.current_outcome)\n sub_type = self.dataset.get_outcome_subtype(self.current_outcome)\n if data_type is None:\n return 0\n elif data_type in [BINARY, DIAGNOSTIC, OTHER]:\n return 4\n elif data_type == CONTINUOUS:\n if sub_type == \"generic_effect\":\n return 0 # no raw data for generic effect\n else:\n return 6\n \n\n def get_current_outcome_type(self, get_str=True):\n ''' Returns the type of the currently displayed (or 'active') outcome (e.g., binary). '''\n return self.dataset.get_outcome_type(self.current_outcome, get_string=get_str)\n \n def get_outcome_type(self, outcome, get_str=True):\n return self.dataset.get_outcome_type(outcome, get_string=get_str)\n \n def get_current_outcome_subtype(self):\n return self.dataset.get_outcome_subtype(self.current_outcome)\n\n def _set_standard_cols(self, d):\n ''' these are immutable '''\n # column indices\n d[\"NAME\"] = self.NAME\n d[\"YEAR\"] = self.YEAR\n d[\"RAW_DATA\"] = self.RAW_DATA\n d[\"OUTCOMES\"] = self.OUTCOMES\n d[\"HEADERS\"] = self.headers\n return d\n\n def make_reasonable_stateful_dict(self, data_model):\n d = {}\n d = self._set_standard_cols(d)\n\n # now take guesses/pick randomly for the remaining\n # fields\n d[\"current_outcome\"] = data_model.get_outcome_names()[0] \n d[\"current_time_point\"] = data_model.get_follow_up_names()[0]\n\n\n # just pick a reasonable current effect,\n # given the outcome data type\n data_type = data_model.get_outcome_type(d[\"current_outcome\"])\n \n print(\"data_type: \", data_type)\n\n all_txs = data_model.get_group_names()\n\n if data_type == DIAGNOSTIC:\n d[\"current_txs\"] = [all_txs[0]]\n else:\n d[\"current_txs\"] = [all_txs[0], all_txs[1]]\n\n effect = None # this is ignored for diagnostic\n if data_type == BINARY:\n effect = \"OR\"\n elif data_type == CONTINUOUS:\n effect = \"SMD\"\n # make sure you call change_metric_if_appropriate\n # after setting this as the state_dict\n d[\"current_effect\"] = effect\n d[\"study_auto_added\"] = False # hmm ?\n \n d[\"conf_level\"] = DEFAULT_CONF_LEVEL\n \n return d\n\n\n def get_stateful_dict(self):\n '''\n This captures the state of the model view; things like the current outcome\n and column indices that are on the QT side of the data table model.\n '''\n d = {}\n d = self._set_standard_cols(d)\n\n # currently displayed outcome, etc\n d[\"current_outcome\"] = self.current_outcome\n d[\"current_time_point\"] = self.current_time_point\n d[\"current_txs\"] = self.current_txs\n d[\"current_effect\"] = self.current_effect\n d[\"study_auto_added\"] = self.study_auto_added\n d[\"conf_level\"] = self.conf_level\n \n return d\n\n def is_diag(self):\n ''' Convenience method -- just returns if the ma_dataset is a diagnostic dataset or not '''\n return self.dataset.is_diag\n \n def set_state(self, state_dict):\n for key, val in state_dict.items():\n if key == \"conf_level\":\n self.set_conf_level(val)\n else:\n exec(\"self.%s = val\" % key)\n \n if \"conf_level\" not in state_dict.keys():\n self.set_conf_level(DEFAULT_CONF_LEVEL)\n \n self.reset()\n\n def raw_data_is_complete_for_study(self, study_index, first_arm_only=False): \n raw_data = self._get_raw_data_according_to_arms(study_index, first_arm_only)\n \n raw_data_is_complete = not \"\" in raw_data and not None in raw_data\n return raw_data_is_complete\n \n def _raw_data_is_not_empty_for_study(self, study_index, first_arm_only=False):\n raw_data = self._get_raw_data_according_to_arms(study_index, first_arm_only)\n \n empty = True\n for x in raw_data:\n if x not in EMPTY_VALS:\n empty = False\n\n return not empty \n\n def _get_raw_data_according_to_arms(self, study_index, first_arm_only = False):\n if self.current_outcome is None or self.current_time_point is None:\n return False\n\n raw_data = self.get_cur_raw_data_for_study(study_index)\n data_type = self.get_current_outcome_type(get_str=False)\n # if first_arm_only is true, we are only concerned with whether\n # or not there is sufficient raw data for the first arm of the study\n \n if first_arm_only:\n if data_type == BINARY:\n raw_data = raw_data[:2]\n elif data_type == CONTINUOUS:\n raw_data = raw_data[:3]\n return raw_data\n\n def data_for_only_one_arm(self):\n '''\n really this should read 'data for one *and only one* arm.\n '''\n data_for_arm_one, data_for_arm_two = False, False\n\n data_type = self.get_current_outcome_type(get_str=False)\n per_group_raw_data_size = 2 if data_type == BINARY else 3\n\n for study_index in range(len(self.dataset.studies)):\n cur_raw_data = self.get_cur_raw_data_for_study(study_index)\n \n if len([x for x in cur_raw_data[:per_group_raw_data_size] if x is not None and x!='']) > 0:\n data_for_arm_one = True\n if len([x for x in cur_raw_data[per_group_raw_data_size:] if x is not None and x!='']) > 0:\n data_for_arm_two = True\n\n return (data_for_arm_one and not data_for_arm_two) or\\\n (data_for_arm_two and not data_for_arm_one)\n\n @DebugHelper\n def try_to_update_outcomes(self):\n for study_index in range(len(self.dataset.studies)):\n self.update_outcome_if_possible(study_index)\n\n \n def blank_all_studies(self, include_them):\n # note that we do *not* change the status of the\n # last study, because this is assumed to be an\n # auto-added (blank) study. formerly, when\n # 'include all' was used, this was being flipped\n # to true for the empty studies, causing issues.\n # this is a fix for issue #178\n for study in self.dataset.studies[:-1]:\n study.include=include_them\n \n ###\n # syntactic high-fructose corn syrup\n def include_all_studies(self):\n self.blank_all_studies(True)\n \n def exclude_all_studies(self):\n self.blank_all_studies(False)\n\n def all_studies_are_included(self):\n return all([study.include for study in self.dataset.studies])\n \n def all_studies_are_excluded(self):\n return all([not study.include for study in self.dataset.studies])\n\n\n def update_outcome_if_possible(self, study_index):\n '''\n Rules:\n Checks the parametric study to ascertain if enough raw data has been\n entered to compute the outcome. If so, the outcome is computed and\n displayed.\n \n If the raw data is not empty, the outcome should be blanked out.\n If the raw data is empty, the outcome should not be effected\n '''\n est_and_ci_d = None\n # to index into the effect belonging to the currently displayed groups\n group_str = self.get_cur_group_str() \n data_type = self.get_current_outcome_type(get_str=False) \n one_arm_effect = self.current_effect in BINARY_ONE_ARM_METRICS + CONTINUOUS_ONE_ARM_METRICS \n ma_unit = self.get_current_ma_unit_for_study(study_index)\n\n ####\n # previously we were always setting this to false here,\n # but below we check only for raw data. in fact,\n # we only want to force an exclude if there is no\n # raw data *and* no manually entered point estimate/CI\n if data_type == DIAGNOSTIC or not self.study_has_point_est(study_index):\n self.dataset.studies[study_index].include = False\n\n # we try to compute outcomes if either all raw data is there, or, if we have a one-arm\n # metric then if sufficient raw data exists to compute this\n if self.raw_data_is_complete_for_study(study_index) or \\\n (one_arm_effect and self.raw_data_is_complete_for_study(study_index, first_arm_only=True)):\n \n if not self.dataset.studies[study_index].manually_excluded:\n # include the study -- note that if the user excluded the study, then\n # edited the raw data, this will re-include it automatically\n self.dataset.studies[study_index].include = True\n\n if data_type == BINARY:\n e1, n1, e2, n2 = self.get_cur_raw_data_for_study(study_index)\n if self.current_effect in BINARY_TWO_ARM_METRICS:\n est_and_ci_d = meta_py_r.effect_for_study(e1, n1, e2, n2,\n metric=self.current_effect,\n conf_level=self.conf_level)\n else:\n # binary, one-arm\n est_and_ci_d = meta_py_r.effect_for_study(e1, n1, \n two_arm=False,\n metric=self.current_effect,\n conf_level=self.conf_level)\n elif data_type == CONTINUOUS:\n n1, m1, sd1, n2, m2, sd2 = self.get_cur_raw_data_for_study(study_index)\n if self.current_effect in CONTINUOUS_TWO_ARM_METRICS:\n est_and_ci_d = meta_py_r.continuous_effect_for_study(n1, m1, sd1,\n n2=n2, m2=m2, sd2=sd2, metric=self.current_effect, conf_level=self.conf_level)\n else:\n # continuous, one-arm metric\n est_and_ci_d = meta_py_r.continuous_effect_for_study(n1, m1, sd1,\n two_arm=False, metric=self.current_effect, conf_level=self.conf_level)\n \n elif data_type == DIAGNOSTIC: \n # diagnostic data\n tp, fn, fp, tn = self.get_cur_raw_data_for_study(study_index)\n\n # sensitivity and specificity\n ests_and_cis = meta_py_r.diagnostic_effects_for_study(\n tp, fn, fp, tn,\n metrics=DIAGNOSTIC_METRICS,\n conf_level=self.conf_level)\n \n ###\n # now we're going to set the effect estimate/CI on the MA object.\n \n for metric in DIAGNOSTIC_METRICS:\n est, lower, upper = ests_and_cis[metric][\"calc_scale\"]\n ma_unit.set_effect_and_ci(metric, group_str, est, lower, upper, mult=self.mult)\n conv_to_disp_scale = self._get_conv_to_display_scale(data_type, effect=metric)\n ma_unit.calculate_display_effect_and_ci(\n metric, group_str, conv_to_disp_scale,\n conf_level=self.get_global_conf_level(),\n mult=self.mult)\n \n ####\n # if we're dealing with continuous or binary data, here\n # is where we update the point estimates -- we do this \n # above in the case of diagnostic data, which needs to be\n # handled differently, because we're updating two\n # outcomes, in that case\n if data_type != DIAGNOSTIC:\n est, lower, upper = None, None, None\n if est_and_ci_d is not None:\n est, lower, upper = est_and_ci_d[\"calc_scale\"] # calculation scale\n # now set the effect size & CIs\n # note that we keep two versions around; a version on the 'calculation' scale\n # (e.g., log) and a version on the continuous/display scale to present to the\n # user via the UI.\n ma_unit.set_effect_and_ci(self.current_effect, group_str, est, lower, upper, mult=self.mult)\n conv_to_disp_scale = self._get_conv_to_display_scale(data_type, effect=self.current_effect, n1=n1)\n ma_unit.calculate_display_effect_and_ci(\n self.current_effect, group_str,\n conv_to_disp_scale,\n conf_level=self.get_global_conf_level(),\n mult=self.mult)\n elif self._raw_data_is_not_empty_for_study(study_index) or (one_arm_effect and self._raw_data_is_not_empty_for_study(study_index, first_arm_only=True)):\n if data_type in [BINARY, CONTINUOUS]: # raw data is not blank but not full so clear outcome\n est, lower, upper, se = None, None, None, None\n ma_unit.set_effect_and_ci(self.current_effect, group_str, est, lower, upper, mult=self.mult)\n ma_unit.set_SE(self.current_effect, group_str, se)\n conv_to_disp_scale = self._get_conv_to_display_scale(data_type, effect=self.current_effect)\n ma_unit.calculate_display_effect_and_ci(\n self.current_effect, group_str,\n conv_to_disp_scale,\n conf_level=self.get_global_conf_level(),\n mult=self.mult)\n else: # raw data is all blank, do nothing\n pass\n \n def get_cur_raw_data(self, only_if_included=True, only_these_studies=None):\n raw_data = []\n \n for study_index in range(len(self.dataset.studies)):\n if not only_if_included or self.dataset.studies[study_index].include:\n if only_these_studies is None or self.dataset.studies[study_index].id in only_these_studies:\n raw_data.append(self.get_cur_raw_data_for_study(study_index))\n\n return raw_data\n \n def included_studies_have_raw_data(self):\n ''' \n True iff all _included_ studies have all raw data (e.g., 2x2 for binary) for the currently\n selected outcome and tx groups.\n\n Note that if the current metric is a *one-arm* metric, we only check the first\n arm; i.e., a study is considered to have raw data in this case if the active arm\n has data.\n '''\n\n one_arm_data = self.current_effect in ONE_ARM_METRICS\n\n # the -1 is again accounting for the last (empty) appended study\n for study_index in range(len(self.dataset.studies)-1):\n if self.dataset.studies[study_index].include:\n if not self.raw_data_is_complete_for_study(study_index,\\\n first_arm_only=one_arm_data):\n return False\n return True\n\n\n def study_has_point_est(self, study_index, effect=None):\n group_str = self.get_cur_group_str()\n effect = effect or self.current_effect\n cur_ma_unit = self.get_current_ma_unit_for_study(study_index)\n \n if None in cur_ma_unit.get_effect_and_se(effect, group_str, self.mult):\n print \"study %s does not have a point estimate\" % study_index\n return False\n \n return \"ok -- has all point estimates\"\n return True\n \n def cur_point_est_and_SE_for_study(self, study_index, effect=None):\n group_str = self.get_cur_group_str()\n cur_ma_unit = self.get_current_ma_unit_for_study(study_index)\n effect = effect or self.current_effect\n \n est = cur_ma_unit.get_estimate(effect, group_str)\n se = cur_ma_unit.get_se(effect, group_str, self.mult)\n return (est, se)\n \n def get_cur_ests_and_SEs(self, only_if_included=True, only_these_studies=None, effect=None):\n ests, SEs = [], []\n effect = effect or self.current_effect\n for study_index in xrange(len(self.dataset.studies)):\n if only_these_studies is None or self.dataset.studies[study_index].id in only_these_studies:\n # issue #171 -- blank studies are *wrongly* set to be included after paste\n if not only_if_included or self.dataset.studies[study_index].include:\n est, SE = self.cur_point_est_and_SE_for_study(study_index, effect=effect)\n ests.append(est)\n SEs.append(SE)\n return (ests, SEs)\n \n def included_studies_have_point_estimates(self, effect=None):\n ''' \n True iff all included studies have all point estiamtes (and CIs) for the\n the 'effect' outcome and currently displayed tx groups. (If effect is None,\n this sets the 'effect' to the currently selected effect).\n '''\n for study_index in range(len(self.dataset.studies)-1):\n if self.dataset.studies[study_index].include:\n if not self.study_has_point_est(study_index, effect=effect):\n return False\n return True \n \n def get_studies(self, only_if_included=True):\n included_studies = []\n\n for study in self.dataset.studies:\n if not only_if_included or study.include:\n included_studies.append(study)\n # we lop off the last entry because it is always a blank line/study\n # 11/18/11 -- arg! previously we were explicitly lopping off\n # the last study (presumed to be blank). this is not necessary! \n # we already check if it's included...\n return list(included_studies) \n\n def get_cur_raw_data_for_study(self, study_index):\n return self.get_current_ma_unit_for_study(study_index).get_raw_data_for_groups(self.current_txs)\n\n def set_current_ma_unit_for_study(self, study_index, new_ma_unit):\n # note that we just assume this exists.\n self.dataset.studies[study_index].outcomes_to_follow_ups[self.current_outcome][self.get_current_follow_up_name()]=new_ma_unit\n \n def get_current_ma_unit_for_study(self, study_index):\n '''\n Returns the MetaAnalytic unit for the study @ study_index. If no such Unit exists,\n it will be added. Thus when a new study is added to a dataset, there is no need\n to initially populate this study with empty MetaAnalytic units reflecting the known\n outcomes, time points & tx groups, as they will be added 'on-demand' here.\n '''\n \n return self.get_ma_unit(study_index=study_index,\n outcome=self.current_outcome,\n follow_up=self.get_current_follow_up_name(),\n tx_groups=self.current_txs)\n\n\n def get_ma_unit(self, study=None, study_index=None, outcome=None, follow_up=None, tx_groups=None):\n '''\n Returns the MetaAnalytic unit for the study @ study_index. If no such Unit exists,\n it will be added. Thus when a new study is added to a dataset, there is no need\n to initially populate this study with empty MetaAnalytic units reflecting the known\n outcomes, time points & tx groups, as they will be added 'on-demand' here.\n \n outcome and follow_up are the names, not ids or instances, of these objects\n \n '''\n \n if None not in [study, study_index]:\n if study != self.dataset.studies[study_index]:\n raise ValueError(\"study and study index don't match\")\n \n if study is None: # you can specify a study OR a study index\n study = self.dataset.studies[study_index]\n \n # first check to see that the current outcome is contained in this study\n if not outcome in study.outcomes_to_follow_ups:\n ###\n # Issue 7 (RESOLVED) http://github.com/bwallace/OpenMeta-analyst-/issues/#issue/7\n study.add_outcome(self.dataset.get_outcome_obj(outcome),\n group_names=self.dataset.get_group_names())\n \n # we must also make sure the time point exists. note that we use the *name* rather than the \n # index of the current time/follow up\n if not self.get_current_follow_up_name() in study.outcomes_to_follow_ups[outcome]:\n study.add_outcome_at_follow_up(self.dataset.get_outcome_obj(outcome),\n follow_up)\n \n \n # finally, make sure the studies contain the currently selected tx groups; if not, add them\n if tx_groups is not None:\n ma_unit = study.outcomes_to_follow_ups[outcome][follow_up]\n for tx_group in tx_groups:\n if not tx_group in ma_unit.get_group_names():\n ma_unit.add_group(tx_group)\n \n return ma_unit\n \n def max_raw_data_cols_for_current_unit(self):\n '''\n Returns the length of the biggest raw data list for the parametric ma_unit. e.g.,\n if a two group, binary outcome is the current ma_unit, then the studies should\n raw data vectors that contain, at most, 4 elements.\n '''\n return \\\n max([len(\\\n study.outcomes_to_follow_ups[self.current_outcome][self.current_time_point].get_raw_data_for_groups(self.current_txs)\\\n ) for study in self.dataset.studies if self.current_outcome in study.outcomes_to_follow_ups])\n\n def recalculate_display_scale(self):\n effect = self.current_effect\n group_str = self.get_cur_group_str()\n current_data_type = self.dataset.get_outcome_type(self.current_outcome)\n \n ma_units = []\n # Gather ma_units for spreadsheet\n for study_index in range(len(self.dataset.studies)-1): #-1 is because last study is always blank\n ma_units.append(self.get_current_ma_unit_for_study(study_index))\n \n binary_display_scale = lambda x: meta_py_r.binary_convert_scale(x, self.current_effect, convert_to=\"display.scale\")\n continuous_display_scale = lambda x: meta_py_r.continuous_convert_scale(x, self.current_effect, convert_to=\"display.scale\")\n \n def get_diagnostic_display_scale(m_str):\n return lambda x: meta_py_r.diagnostic_convert_scale(x, m_str, convert_to=\"display.scale\") \n \n for index, x in enumerate(ma_units):\n print(\"Recalculating display scale for ma_unit %d\" % index)\n\n if current_data_type in [BINARY,CONTINUOUS]:\n if current_data_type == BINARY:\n convert_to_display_scale = binary_display_scale\n elif current_data_type == CONTINUOUS:\n convert_to_display_scale = continuous_display_scale\n x.calculate_display_effect_and_ci(effect, group_str,\n convert_to_display_scale,\n conf_level=self.get_global_conf_level(), mult=self.mult,\n check_if_necessary=True)\n elif current_data_type == DIAGNOSTIC:\n for m_str in [\"Sens\",\"Spec\"]:\n x.calculate_display_effect_and_ci(m_str, group_str,\n convert_to_display_scale=get_diagnostic_display_scale(m_str),\n conf_level=self.get_global_conf_level(), mult=self.mult,\n check_if_necessary=True)\n print(\"Finished calculating display effect and cis\")\n\n def _get_conv_to_display_scale(self, data_type, effect, n1=None):\n ''' Returns appropriate conv_to_display_scale function '''\n \n if None in [data_type, effect]:\n print(\"_get_conv_to_display_scale got None for either data_type, or effect\")\n\n if data_type == BINARY:\n conv_to_disp_scale = lambda x: meta_py_r.binary_convert_scale(x, effect, convert_to=\"display.scale\", n1=n1)\n elif data_type == CONTINUOUS:\n conv_to_disp_scale = lambda x: meta_py_r.continuous_convert_scale(x, effect, convert_to=\"display.scale\")\n elif data_type == DIAGNOSTIC:\n conv_to_disp_scale = lambda x: meta_py_r.diagnostic_convert_scale(x, effect, convert_to=\"display.scale\")\n else:\n raise Exception(\"_get_conv_to_display_scale: data type not recognized!\")\n \n return conv_to_disp_scale\n \n def _get_calc_scale_value(self, display_scale_val=None, data_type=None, effect=None, n1=None):\n ''' Gets the calc-scale value of the given display_scale value'''\n \n if None in [display_scale_val, data_type, effect]:\n print(\"_get_calc_scale_value got None for either display_scale_val, data_type, or effect\")\n \n calc_scale_val = None\n if data_type == BINARY:\n calc_scale_val = meta_py_r.binary_convert_scale(display_scale_val, effect, convert_to=\"calc.scale\", n1=n1)\n elif data_type == CONTINUOUS:\n calc_scale_val = meta_py_r.continuous_convert_scale(display_scale_val, effect, convert_to=\"calc.scale\")\n elif data_type == DIAGNOSTIC:\n calc_scale_val = meta_py_r.diagnostic_convert_scale(display_scale_val, effect, convert_to=\"calc.scale\") \n else:\n raise Exception(\"_get_calc_scale_value: data type not recognized!\")\n \n return calc_scale_val\n\n\n def set_conf_level(self, conf_lev):\n ''' sets multiplier as well (~1.96 for 95% conf level) '''\n \n invalid_conf_lev_msg = \"Confidence level needs to be a number between 0 and 100\"\n if conf_lev is None:\n raise ValueError(invalid_conf_lev_msg)\n elif not (0 < conf_lev < 100):\n raise ValueError(invalid_conf_lev_msg)\n \n self.conf_level = float(conf_lev)\n print(\"Set confidence level to: %f\" % conf_lev)\n \n self.mult = meta_py_r.get_mult_from_r(conf_lev)\n print(\"mult is now: %s\" % str(self.mult))\n \n # set in R as well\n r_str = \"set.global.conf.level(\"+str(float(conf_lev))+\")\"\n new_cl_in_R = meta_py_r.ro.r(r_str)[0]\n print(\"Set confidence level in R to: %f\" % new_cl_in_R)\n\n self.emit(SIGNAL(\"conf_level_changed()\"))\n \n return conf_lev\n \n def get_global_conf_level(self):\n return self.conf_level\n \n def get_mult(self):\n print(\"mult is %s\" % str(self.mult))\n return self.mult\n" }, { "alpha_fraction": 0.5718892216682434, "alphanum_fraction": 0.5777318477630615, "avg_line_length": 48.100135803222656, "blob_id": "416bd7ad64d09f53b3c26c79f355b07c068f6760", "content_id": "715fae3d27d9f73633ba975d1ad0ca0dd5c5cb06", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 36287, "license_type": "no_license", "max_line_length": 226, "num_lines": 739, "path": "/src/R/openmetar/R/binary_methods.r", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "\n####################################\n# OpenMeta[Analyst] #\n# ---- #\n# binary_methods.r # \n# Facade module; wraps methods #\n# that perform analysis on binary #\n# data in a coherent interface. # \n####################################\n\nlibrary(metafor)\n\nbinary.logit.metrics <- c(\"PLO\")\nbinary.log.metrics <- c(\"OR\", \"RR\", \"PLN\")\nbinary.arcsine.metrics <- c(\"PAS\")\n# The two-arm metric arcsine risk difference (AS) is not included in binary.arcsine.metrics\n# so that display scale will be same as calculation scale.\nbinary.freeman_tukey.metrics <- c(\"PFT\")\nbinary.two.arm.metrics <- c(\"OR\", \"RD\", \"RR\", \"AS\", \"YUQ\", \"YUY\")\nbinary.one.arm.metrics <- c(\"PR\", \"PLN\", \"PLO\", \"PAS\", \"PFT\")\n\n\ncompute.for.one.bin.study <- function(binary.data, params){\n res <- escalc(params$measure, ai=binary.data@g1O1, bi=binary.data@g1O2, \n ci=binary.data@g2O1, di=binary.data@g2O2,\n add=params$adjust, to=params$to)\n res \n}\n\ncompute.bin.point.estimates <- function(binary.data, params) {\n# Computes point estimates based on raw data and adds them to binary.data.\n res <- compute.for.one.bin.study(binary.data, params)\n binary.data@y <- res$yi\n binary.data@SE <- sqrt(res$vi)\n binary.data\n}\n\nbinary.transform.f <- function(metric.str){\n display.scale <- function(x, ...){\n \n extra.args <- list(...)\n \n if (metric.str %in% binary.log.metrics){\n exp(x)\n } else if (metric.str %in% binary.logit.metrics){\n invlogit(x)\n } else if (metric.str %in% binary.arcsine.metrics){\n invarcsine.sqrt(x)\n } else if (metric.str %in% binary.freeman_tukey.metrics){\n ni <- extra.args[['ni']]\n if (length(x)==1) {\n # If x has length 1, use harmonic mean inverse transform, which takes the harmonic mean of n as second arg. \n # If n also has length 1, this is the same as trans.ipft(x,n).\n transf.ipft.hm(x, targs=list(ni=ni))\n } else {\n transf.ipft(x, ni)\n }\n } else { \n # identity function\n x\n }\n } \n\n \n calc.scale <- function(x, ...){\n \n extra.args <- list(...)\n if (metric.str %in% binary.log.metrics){\n log(x)\n } else if (metric.str %in% binary.logit.metrics){\n logit(x) \n } else if (metric.str %in% binary.arcsine.metrics){\n arcsine.sqrt(x) \n } else if (metric.str %in% binary.freeman_tukey.metrics){\n ni <- extra.args[['ni']]\n if (length(x)==1) {\n transf.pft(x, ni)\n }\n } else {\n # identity function\n x\n }\n }\n\n list(display.scale = display.scale, calc.scale = calc.scale)\n}\n\nget.res.for.one.binary.study <- function(binary.data, params) {\n # this method can be called when there is only one study to \n # get the point estimate and lower/upper bounds.\n y<-NULL\n se<-NULL\n if (is.na(binary.data@y)){\n res <- compute.for.one.bin.study(binary.data, params) \n y <- res$yi[1]\n se <- sqrt(res$vi[1])\n }\n else{\n y <- binary.data@y[1]\n se <- binary.data@SE[1]\n }\n # note: conf.level is given as, e.g., 95, rather than .95.\n alpha <- 1.0-(params$conf.level/100.0)\n mult <- abs(qnorm(alpha/2.0))\n ub <- y + mult*se\n lb <- y - mult*se\n # we make lists to comply with the get.overall method\n res <- list(\"b\"=c(y), \"ci.lb\"=lb, \"ci.ub\"=ub, \"se\"=se) \n res\n}\n\n#convert.res.conf.level(data) {\n## ;\n##}\n\n\n\ncreate.binary.data.array <- function(binary.data, params, res){\n # Extracts data from binary.data and puts it into an array for the the first summary display table.\n tx1.name <- \"tx A\"\n tx2.name <- \"tx B\"\n # TODO: these should be taken from the corresponding column labels in the GUI and passed in via params.\n digits.str <- paste(\"%.\", params$digits, \"f\", sep=\"\")\n effect.size.name <- pretty.metric.name(as.character(params$measure))\n # Caculate confidence intervals\n study.ci.bounds <- calc.ci.bounds(binary.data, params)\n y.disp <- binary.transform.f(params$measure)$display.scale(binary.data@y)\n lb.disp <- binary.transform.f(params$measure)$display.scale(study.ci.bounds$lb)\n ub.disp <- binary.transform.f(params$measure)$display.scale(study.ci.bounds$ub)\n y <- sprintf(digits.str, y.disp)\n LL <- sprintf(digits.str, lb.disp)\n UL <- sprintf(digits.str, ub.disp)\n weights <- res$study.weights\n weights <- sprintf(digits.str, weights)\n weights <- format(weights, justify=\"right\")\n # Extract the data from binary.data and round\n event.txA <- format(binary.data@g1O1, justify=\"right\")\n subject.txA <- format(binary.data@g1O1 + binary.data@g1O2, justify=\"right\")\n \n if (params$measure %in% binary.two.arm.metrics) {\n event.txB <- format(binary.data@g2O1, justify=\"right\")\n subject.txB <- format(binary.data@g2O1 + binary.data@g2O2, justify=\"right\") \n raw.data <- array(c(\"Study\", paste([email protected], \" \", binary.data@years, sep=\"\"), \n paste(tx1.name, \" Events\", sep=\"\"), event.txA, \n paste(tx1.name, \" Subjects\", sep=\"\"), subject.txA, \n paste(tx2.name, \" Events\", sep=\"\"), event.txB, \n paste(tx2.name, \" Subjects\", sep=\"\"), subject.txB, \n effect.size.name, y, \"Lower\", LL, \"Upper\", UL, \"Weight\", weights), \n dim=c(length([email protected]) + 1, 9))\n class(raw.data) <- \"summary.data\" \n } else if (params$measure %in% binary.one.arm.metrics) {\n raw.data <- array(c(\"Study\", paste([email protected], \" \", binary.data@years, sep=\"\"), \n paste(tx1.name, \" Events\", sep=\"\"), event.txA, \n paste(tx1.name, \" Subjects\", sep=\"\"), subject.txA, \n effect.size.name, y, \"Lower\", LL, \"Upper\", UL, \"Weight\", weights),\n dim=c(length([email protected]) + 1, 7))\n }\n return(raw.data)\n}\n\nwrite.bin.study.data.to.file <- function(binary.data, params, res, data.outpath) {\n # create data frame and write to csv\n effect.size.name <- pretty.metric.name(as.character(params$measure))\n y.disp <- binary.transform.f(params$measure)$display.scale(binary.data@y)\n study.ci.bounds <- calc.ci.bounds(binary.data, params)\n if (params$measure %in% binary.two.arm.metrics) {\n study.data.df <- data.frame(\"study.names\"=paste([email protected], \" \", binary.data@years, sep=\"\"),\n \"txA.events\" = binary.data@g1O1,\n \"txA.subjects\" = binary.data@g1O1 + binary.data@g1O2,\n \"txB.events\" = binary.data@g2O1,\n \"txB.subjects\" = binary.data@g2O1 + binary.data@g2O2,\n \"Effect.size\" = binary.transform.f(params$measure)$display.scale(binary.data@y),\n \"Lower.bound\" = binary.transform.f(params$measure)$display.scale(study.ci.bounds$lb),\n \"Upper.bound\" = binary.transform.f(params$measure)$display.scale(study.ci.bounds$ub),\n \"Weight\" = res$study.weights)\n } else if(params$measure %in% binary.one.arm.metrics) {\n study.data.df <- data.frame(\"study.names\"=paste([email protected], \" \", binary.data@years, sep=\"\"),\n \"txA.events\" = binary.data@g1O1,\n \"txA.subjects\" = binary.data@g1O1 + binary.data@g1O2,\n \"Effect.size\" = binary.transform.f(params$measure)$display.scale(binary.data@y),\n \"Lower.bound\" = binary.transform.f(params$measure)$display.scale(study.ci.bounds$lb),\n \"Upper.bound\" = binary.transform.f(params$measure)$display.scale(study.ci.bounds$ub),\n \"Weight\" = res$study.weights)\n }\n # Rename effect size column\n names(study.data.df)[names(study.data.df)==\"Effect.size\"] <- effect.size.name\n write.csv(study.data.df, file=data.outpath, append=FALSE, row.names=FALSE)\n}\n\n###################################################\n# binary fixed effects -- inverse variance #\n###################################################\nbinary.fixed.inv.var <- function(binary.data, params){\n # assert that the argument is the correct type\n if (!(\"BinaryData\" %in% class(binary.data)))\n stop(\"Binary data expected.\")\n \n results <- NULL\n input.params <- params\n \n if (length(binary.data@g1O1) == 1 || length(binary.data@y) == 1){\n res <- get.res.for.one.binary.study(binary.data, params)\n # Package res for use by overall method.\n results <- list(\"Summary\"=res,\n \"res\"=res)\n } else {\n # call out to the metafor package\n res<-rma.uni(yi=binary.data@y, sei=binary.data@SE, [email protected],\n level=params$conf.level, digits=params$digits, method=\"FE\", add=c(params$adjust,params$adjust),\n to=c(as.character(params$to), as.character(params$to)))\n pure.res <- res\n # Create forest plot and list to display summary of results\n metric.name <- pretty.metric.name(as.character(params$measure))\n model.title <- paste(\"Binary Fixed-Effect Model - Inverse Variance\\n\\nMetric: \", metric.name, sep=\"\")\n # Create results display tables\n summary.disp <- create.summary.disp(binary.data, params, res, model.title)\n forest.path <- paste(params$fp_outpath, sep=\"\")\n plot.data <- create.plot.data.binary(binary.data, params, res)\n changed.params <- plot.data$changed.params\n # list of changed params values\n\t\t\n\t\t\n\t\tforest.plot.params.path <- \"\"\n\t\tif (is.null(params$supress.output) || !params$supress.output) {\n params.changed.in.forest.plot <- forest.plot(forest.data=plot.data, outpath=forest.path)\n changed.params <- c(changed.params, params.changed.in.forest.plot)\n params[names(changed.params)] <- changed.params\n # update params values\n forest.plot.params.path <- save.data(binary.data, res, params, plot.data)\n\t\t}\n \n # Now we package the results in a dictionary (technically, a named \n # vector). In particular, there are two fields that must be returned; \n # a dictionary of images (mapping titles to image paths) and a list of texts\n # (mapping titles to pretty-printed text). In this case we have only one \n # of each. \n # \n \n plot.params.paths <- c(\"Forest Plot\"=forest.plot.params.path)\n images <- c(\"Forest Plot\"=forest.path)\n plot.names <- c(\"forest plot\"=\"forest_plot\")\n pure.res$weights <- weights(res)\n results <- list(\"input_data\"=binary.data,\n \"input_params\"=input.params,\n \"images\"=images,\n \"Summary\"=capture.output.and.collapse(summary.disp),\n \"plot_names\"=plot.names, \n \"plot_params_paths\"=plot.params.paths,\n \"res\"=pure.res,\n \"res.info\"=binary.fixed.inv.var.value.info(),\n \"weights\"=weights(res))\n }\n \n #references <- \"this is a placeholder for binary fixed effect inv var reference\"\n #references <- \"\"\n #results[[\"References\"]] <- references\n results\n}\n\nbinary.fixed.inv.var.value.info <- function() {\n rma.uni.value.info()\n}\n\nbinary.fixed.inv.var.is.feasible.for.funnel <- function() {\n TRUE\n}\n \nbinary.fixed.inv.var.parameters <- function(){\n # parameters\n apply_adjustment_to = c(\"only0\", \"all\")\n \n params <- list(\"conf.level\"=\"float\",\n \"digits\"=\"int\", \n \"adjust\"=\"float\",\n \"to\"=apply_adjustment_to)\n \n # default values\n defaults <- list(\"conf.level\"=95, \"digits\"=3, \"adjust\"=.5, \"to\"=\"only0\")\n \n var_order = c(\"conf.level\", \"digits\", \"adjust\", \"to\")\n \n parameters <- list(\"parameters\"=params, \"defaults\"=defaults, \"var_order\"=var_order)\n}\n\nbinary.fixed.inv.var.pretty.names <- function() {\n pretty.names <- list(\"pretty.name\"=\"Binary Fixed-Effect Inverse Variance\", \n \"description\" = \"Performs fixed-effect meta-analysis with inverse variance weighting.\",\n \"conf.level\"=list(\"pretty.name\"=\"Confidence level\", \"description\"=\"Level at which to compute confidence intervals\"), \n \"digits\"=list(\"pretty.name\"=\"Number of digits\", \"description\"=\"Number of digits to display in results\"),\n \"adjust\"=list(\"pretty.name\"=\"Correction factor\", \"description\"=\"Constant c that is added to the entries of a two-by-two table.\"),\n \"to\"=list(\"pretty.name\"=\"Add correction factor to\", \"description\"=\"When Add correction factor is set to \\\"only 0\\\", the correction factor\n is added to all cells of each two-by-two table that contains at leason one zero. When set to \\\"all\\\", the correction factor\n is added to all two-by-two tables if at least one table contains a zero.\")\n )\n}\n\nbinary.fixed.inv.var.overall <- function(results) {\n # this parses out the overall from the computed result\n res <- results$res\n}\n\n############################################\n# binary fixed effects -- mantel haenszel #\n############################################\nbinary.fixed.mh <- function(binary.data, params){ \n # assert that the argument is the correct type\n if (!(\"BinaryData\" %in% class(binary.data)))\n stop(\"Binary data expected.\") \n \n results <- NULL\n input.params <- params\n \n if (length(binary.data@g1O1) == 1 || length(binary.data@y) == 1){\n res <- get.res.for.one.binary.study(binary.data, params)\n # Package res for use by overall method.\n results <- list(\"Summary\"=res, # shouldn't assume this is any more than a string\n \"res\"=res) # actual metafor output\n } else {\n res<-rma.mh(ai=binary.data@g1O1, bi=binary.data@g1O2, \n ci=binary.data@g2O1, di=binary.data@g2O2,\n [email protected],\n level=params$conf.level,\n digits=params$digits,\n measure=params$measure,\n add=c(params$adjust, 0),\n to=c(as.character(params$to), \"none\"))\n pure.res <- res\n if (is.null(binary.data@y) || is.null(binary.data@SE)) {\n # compute point estimates for plot.data in case they are missing\n binary.data <- compute.bin.point.estimates(binary.data, params)\n }\n # Create forest plot and list to display summary of results\n metric.name <- pretty.metric.name(as.character(params$measure))\n model.title <- paste(\"Binary Fixed-Effect Model - Mantel Haenszel\\n\\nMetric: \", metric.name, sep=\"\")\n # Create results display tables\n summary.disp <- create.summary.disp(binary.data, params, res, model.title)\n forest.path <- paste(params$fp_outpath, sep=\"\")\n plot.data <- create.plot.data.binary(binary.data, params, res)\n changed.params <- plot.data$changed.params\n\t\t\n\t\tforest.plot.params.path <- \"\"\n\t\tif (is.null(params$supress.output) || !params$supress.output) {\n\t # list of changed params values\n\t params.changed.in.forest.plot <- forest.plot(forest.data=plot.data, outpath=forest.path)\n\t changed.params <- c(changed.params, params.changed.in.forest.plot)\n\t params[names(changed.params)] <- changed.params\n\t # dump the forest plot params to disk; return path to\n\t # this .Rdata for later use\n\t forest.plot.params.path <- save.data(binary.data, res, params, plot.data)\n\t\t}\n \n\t\t\n\t\t\n\t\t\n # Now we package the results in a dictionary (technically, a named \n # vector). In particular, there are two fields that must be returned; \n # a dictionary of images (mapping titles to image paths) and a list of texts\n # (mapping titles to pretty-printed text). In this case we have only one \n # of each. \n # \n #references <- \"Mantel, N., & Haenszel, W. (1959) Statistical aspects of the analysis of data from retrospective studies of disease. Journal of the National Cancer Institute, 22, 719-748.\"\n plot.params.paths <- c(\"Forest Plot\"=forest.plot.params.path)\n images <- c(\"Forest Plot\"=forest.path)\n plot.names <- c(\"forest plot\"=\"forest_plot\")\n pure.res$weights <- weights(res)\n results <- list(\"input_data\"=binary.data,\n \"input_params\"=input.params,\n \"images\"=images,\n \"Summary\"=capture.output.and.collapse(summary.disp), \n \"plot_names\"=plot.names,\n \"plot_params_paths\"=plot.params.paths,\n \"res\"=pure.res,\n \"res.info\"=binary.fixed.mh.value.info(),\n \"weights\"=weights(res))\n \n \n }\n \n references <- \"Mantel, N., & Haenszel, W. (1959) Statistical aspects of the analysis of data from retrospective studies of disease. Journal of the National Cancer Institute, 22, 719-748.\"\n results[[\"References\"]] = references\n \n results\n}\n\n\nbinary.fixed.mh.value.info <- function() {\n list(\n b = list(type=\"vector\", description='estimated coefficients of the model.'),\n se = list(type=\"vector\", description='standard errors of the coefficients.'),\n zval = list(type=\"vector\", description='test statistics of the coefficients.'),\n pval = list(type=\"vector\", description='p-values for the test statistics.'),\n ci.lb = list(type=\"vector\", description='lower bound of the confidence intervals for the coefficients.'),\n ci.ub = list(type=\"vector\", description='upper bound of the confidence intervals for the coefficients.'),\n QE = list(type=\"vector\", description='test statistic for the test of (residual) heterogeneity.'),\n QEp = list(type=\"vector\", description='p-value for the test of (residual) heterogeneity.'),\n MH = list(type=\"vector\", description='Cochran-Mantel-Haenszel test statistic (measure=\"OR\") or Mantel-Haenszel test statistic (measure=\"IRR\").'),\n MHp = list(type=\"vector\", description='corresponding p-value'),\n TA = list(type=\"vector\", description='Tarone’s heterogeneity test statistic (only when measure=\"OR\").'), \n TAp = list(type=\"vector\", description='corresponding p-value (only when measure=\"OR\").'), \n k = list(type=\"vector\", description='number of tables included in the analysis.'), \n yi = list(type=\"vector\", description='the vector of outcomes'),\n vi = list(type=\"vector\", description='the corresponding sample variances'),\n fit.stats= list(type=\"data.frame\", description='a list with the log-likelihood, deviance, AIC, BIC, and AICc values under the unrestricted and restricted likelihood.'),\n \n # not part of rma.mh default output\n weights = list(type=\"vector\", description=\"weights in % given to the observed effects\")\n)\n}\n \nbinary.fixed.mh.parameters <- function(){\n # parameters\n apply_adjustment_to = c(\"only0\", \"all\")\n \n params <- list(\"conf.level\"=\"float\", \"digits\"=\"int\",\n \"adjust\"=\"float\", \"to\"=apply_adjustment_to)\n \n # default values\n defaults <- list(\"conf.level\"=95, \"digits\"=3, \"adjust\"=.5, \"to\"=\"only0\")\n \n var_order = c(\"conf.level\", \"digits\", \"adjust\", \"to\")\n \n # constraints\n parameters <- list(\"parameters\"=params, \"defaults\"=defaults, \"var_order\"=var_order)\n}\n\nbinary.fixed.mh.pretty.names <- function() {\n pretty.names <- list(\"pretty.name\"=\"Binary Fixed-Effect Mantel Haenszel\", \n \"description\" = \"Performs fixed-effect meta-analysis using the Mantel Haenszel method.\",\n \"conf.level\"=list(\"pretty.name\"=\"Confidence level\", \"description\"=\"Level at which to compute confidence intervals\"), \n \"digits\"=list(\"pretty.name\"=\"Number of digits\", \"description\"=\"Number of digits to display in results\"),\n \"adjust\"=list(\"pretty.name\"=\"Correction factor\", \"description\"=\"Constant c that is added to the entries of a two-by-two table.\"),\n \"to\"=list(\"pretty.name\"=\"Add correction factor to\", \"description\"=\"When Add correction factor is set to \\\"only 0\\\", the correction factor\n is added to all cells of each two-by-two table that contains at leason one zero. When set to \\\"all\\\", the correction factor\n is added to all two-by-two tables if at least one table contains a zero.\")\n )\n}\n\n\nbinary.fixed.mh.is.feasible <- function(binary.data, metric){\n # only feasible if we have raw (2x2) data for all studies\n # in this case the metric is ignored\n length(binary.data@g1O1)==length(binary.data@g1O2) &&\n length(binary.data@g1O2)==length(binary.data@g2O1) &&\n length(binary.data@g2O1)==length(binary.data@g2O2) &&\n length(binary.data@g1O1) > 0\n}\n\nbinary.fixed.mh.is.feasible.for.funnel <- function() {\n FALSE\n}\n\nbinary.fixed.mh.overall <- function(results) {\n # this parses out the overall from the computed result\n res <- results$res\n}\n \n##################################################\n# binary fixed effects -- Peto #\n##################################################\nbinary.fixed.peto <- function(binary.data, params) { \n # assert that the argument is the correct type\n if (!(\"BinaryData\" %in% class(binary.data)))\n stop(\"Binary data expected.\") \n \n input.params <- params\n \n if (length(binary.data@g1O1) == 1) {\n res <- get.res.for.one.binary.study(binary.data, params)\n # Package res for use by overall method.\n results <- list(\"Summary\"=res,\n \"res\"=res)\n } else {\n res <- rma.peto(ai=binary.data@g1O1, bi=binary.data@g1O2, \n ci=binary.data@g2O1, di=binary.data@g2O2,\n [email protected],\n level=params$conf.level,\n digits=params$digits,\n add=c(params$adjust,params$adjust),\n to=c(as.character(params$to), as.character(params$to)),\n drop00 = FALSE) # needed in metafor 1.8, unknown in 1.6\n pure.res <- res\n # Corrected values for y and SE\n binary.data@y <- res$yi\n binary.data@SE <- sqrt(res$vi)\n \n if (is.null(binary.data@y) || is.null(binary.data@SE)) {\n # compute point estimates for plot.data in case they are missing\n binary.data <- compute.bin.point.estimates(binary.data, params)\n }\n \n # Create forest plot and list to display summary of results\n metric.name <- pretty.metric.name(as.character(params$measure))\n model.title <- \"Binary Fixed-Effect Model - Peto\\n\\nMetric: Odds Ratio\"\n # Create results display tables\n summary.disp <- create.summary.disp(binary.data, params, res, model.title)\n #\n # generate forest plot \n #\n forest.path <- paste(params$fp_outpath, sep=\"\")\n plot.data <- create.plot.data.binary(binary.data, params, res)\n changed.params <- plot.data$changed.params\n\t\t\n\t\tforest.plot.params.path <- \"\"\n\t\tif (is.null(params$supress.output) || !params$supress.output) {\n\t # list of changed params values\n\t params.changed.in.forest.plot <- forest.plot(forest.data=plot.data, outpath=forest.path)\n\t changed.params <- c(changed.params, params.changed.in.forest.plot)\n\t params[names(changed.params)] <- changed.params\n\t # dump the forest plot params to disk; return path to\n\t # this .Rdata for later use\n\t forest.plot.params.path <- save.data(binary.data, res, params, plot.data)\n\t\t}\n \n\t\t\n\t\t\n # Now we package the results in a dictionary (technically, a named \n # vector). In particular, there are two fields that must be returned; \n # a dictionary of images (mapping titles to image paths) and a list of texts\n # (mapping titles to pretty-printed text). In this case we have only one \n # of each. \n #\n plot.params.paths <- c(\"Forest Plot\"=forest.plot.params.path)\n images <- c(\"Forest Plot\"=forest.path)\n plot.names <- c(\"forest plot\"=\"forest_plot\")\n pure.res$weights <- weights(res)\n results <- list(\"input_data\"=binary.data,\n \"input_params\"=input.params,\n \"images\"=images,\n \"Summary\"=capture.output.and.collapse(summary.disp),\n \"plot_names\"=plot.names,\n \"plot_params_paths\"=plot.params.paths,\n \"res\"=pure.res, # if res is here, res.info must be too\n \"res.info\"=binary.fixed.peto.value.info(),\n \"weights\"=weights(res))\n }\n \n references <- \"Fixed Peto: Yusuf, S., Peto, R., Lewis, J., Collins, R., & Sleight, P. (1985). Beta blockade during and after myocardial infarction: An overview of the randomized trials. Progress in Cardiovascular Disease, 27, 335-371.\"\n results[[\"References\"]] <- references\n results\n}\n\nbinary.fixed.peto.value.info <- function() {\n list(\n b = list(type=\"vector\", description='estimated coefficients of the model.'),\n se = list(type=\"vector\", description='standard errors of the coefficients.'),\n zval = list(type=\"vector\", description='test statistics of the coefficients.'),\n pval = list(type=\"vector\", description='p-values for the test statistics.'),\n ci.lb = list(type=\"vector\", description='lower bound of the confidence intervals for the coefficients.'),\n ci.ub = list(type=\"vector\", description='upper bound of the confidence intervals for the coefficients.'),\n QE = list(type=\"vector\", description='test statistic for the test of heterogeneity.'),\n QEp = list(type=\"vector\", description='p-value for the test of heterogeneity.'),\n k = list(type=\"vector\", description='number of tables included in the analysis'),\n yi = list(type=\"vector\", description='the vector of outcomes'),\n vi = list(type=\"vector\", description='the corresponding sample variances'),\n fit.stats= list(type=\"data.frame\", description='a list with the log-likelihood, deviance, AIC, BIC, and AICc values under the unrestricted and restricted likelihood.'),\n \n # not part of rma.peto output\n weights = list(type=\"vector\", description=\"weights in % given to the observed effects\")\n )\n}\n \nbinary.fixed.peto.parameters <- function(){\n # parameters\n apply_adjustment_to = c(\"only0\", \"all\")\n \n params <- list(\"conf.level\"=\"float\", \"digits\"=\"int\",\n \"adjust\"=\"float\", \"to\"=apply_adjustment_to)\n \n # default values\n defaults <- list(\"conf.level\"=95, \"digits\"=3, \"adjust\"=.5, \"to\"=\"only0\")\n \n var_order = c(\"conf.level\", \"digits\", \"adjust\", \"to\")\n \n parameters <- list(\"parameters\"=params, \"defaults\"=defaults, \"var_order\"=var_order)\n}\n\nbinary.fixed.peto.pretty.names <- function() {\n pretty.names <- list(\"pretty.name\"=\"Binary Fixed-Effect Peto\", \n \"description\" = \"Performs fixed-effect meta-analysis using the Peto method.\",\n \"conf.level\"=list(\"pretty.name\"=\"Confidence level\", \"description\"=\"Level at which to compute confidence intervals\"), \n \"digits\"=list(\"pretty.name\"=\"Number of digits\", \"description\"=\"Number of digits to display in results\"),\n \"adjust\"=list(\"pretty.name\"=\"Correction factor\", \"description\"=\"Constant c that is added to the entries of a two-by-two table.\"),\n \"to\"=list(\"pretty.name\"=\"Add correction factor to\", \"description\"=\"When Add correction factor is set to \\\"only 0\\\", the correction factor\n is added to all cells of each two-by-two table that contains at leason one zero. When set to \\\"all\\\", the correction factor\n is added to all two-by-two tables if at least one table contains a zero.\")\n )\n}\n\nbinary.fixed.peto.is.feasible <- function(binary.data, metric){\n # only feasible if we have raw (2x2) data for all studies\n # and the metric is `OR'\n metric == \"OR\" &&\n length(binary.data@g1O1)==length(binary.data@g1O2) &&\n length(binary.data@g1O2)==length(binary.data@g2O1) &&\n length(binary.data@g2O1)==length(binary.data@g2O2) &&\n length(binary.data@g1O1) > 0\n}\n\nbinary.fixed.peto.is.feasible.for.funnel <- function() {\n FALSE\n}\n\nbinary.fixed.peto.overall <- function(results) {\n # this parses out the overall from the computed result\n res <- results$res\n}\n\n\n##################################\n# binary random effects #\n##################################\nbinary.random <- function(binary.data, params) { \n # assert that the argument is the correct type\n if (!(\"BinaryData\" %in% class(binary.data))) stop(\"Binary data expected.\")\n \n results <- NULL\n input.params <- params\n \n if (length(binary.data@g1O1) == 1 || length(binary.data@y) == 1){\n res <- get.res.for.one.binary.study(binary.data, params)\n # Package res for use by overall method.\n results <- list(\"Summary\"=res,\n \"res\"=res)\n } else { \n # call out to the metafor package\n res<-rma.uni(yi=binary.data@y, sei=binary.data@SE, \n [email protected],\n method=params$rm.method, level=params$conf.level,\n digits=params$digits,\n add=c(params$adjust,params$adjust),\n to=as.character(params$to))\n ##drop00 = FALSE) # needed in metafor 1.8, unknown in 1.6\n pure.res <- res # store res before it gets messed with\n if (is.null(binary.data@y) || is.null(binary.data@SE)) {\n # compute point estimates for plot.data in case they are missing\n binary.data <- compute.bin.point.estimates(binary.data, params)\n }\n # Create forest plot and list to display summary of results\n #\n metric.name <- pretty.metric.name(as.character(params$measure))\n model.title <- paste(\"Binary Random-Effects Model\\n\\nMetric: \", metric.name, sep=\"\")\n \n # Create results display tables\n summary.disp <- create.summary.disp(binary.data, params, res, model.title)\n #\n # generate forest plot \n \n forest.path <- paste(params$fp_outpath, sep=\"\")\n plot.data <- create.plot.data.binary(binary.data, params, res)\n changed.params <- plot.data$changed.params\n\t\t\n\t\tforest.plot.params.path <- \"\"\n\t\tif (is.null(params$supress.output) || !params$supress.output) {\n\t # list of changed params values\n\t params.changed.in.forest.plot <- forest.plot(forest.data=plot.data, outpath=forest.path)\n\t changed.params <- c(changed.params, params.changed.in.forest.plot)\n\t params[names(changed.params)] <- changed.params\n\t # dump the forest plot params to disk; return path to\n\t # this .Rdata for later use\n\t forest.plot.params.path <- save.data(binary.data, res, params, plot.data)\n\t\t}\n \n\t\t\n # Now we package the results in a dictionary (technically, a named \n # vector). In particular, there are two fields that must be returned; \n # a dictionary of images (mapping titles to image paths) and a list of texts\n # (mapping titles to pretty-printed text). In this case we have only one \n # of each. \n # \n plot.params.paths <- c(\"Forest Plot\"=forest.plot.params.path)\n images <- c(\"Forest Plot\"=forest.path)\n plot.names <- c(\"forest plot\"=\"forest_plot\")\n pure.res$weights <- weights(res) # not pure anymore, oh well\n results <- list(\"input_data\"=binary.data, # the data that was given to the routine in the first place\n \"input_params\"=input.params,\n \"images\"=images,\n \"Summary\"=capture.output.and.collapse(summary.disp),\n \"plot_names\"=plot.names,\n \"plot_params_paths\"=plot.params.paths,\n \"res\"=pure.res, # the results directly from metafor in order to extract values of interests\n \"res.info\"=binary.random.value.info(),\n \"weights\"=weights(res))\n }\n \n #references <- \"this is a placeholder for binary random reference\"\n #references <- \"\"\n #results[[\"References\"]] <- references\n results\n}\n\n# Returns list mapping name-->type for the pure results output by metafor\nbinary.random.value.info <- function() {\n rma.uni.value.info()\n}\n\nbinary.random.is.feasible.for.funnel <- function () {\n TRUE\n}\n\n\nbinary.random.parameters <- function(){\n # parameters\n apply_adjustment_to = c(\"only0\", \"all\")\n rm_method_ls <- c(\"HE\", \"DL\", \"SJ\", \"ML\", \"REML\", \"EB\")\n params <- list(\"rm.method\"=rm_method_ls, \"conf.level\"=\"float\", \"digits\"=\"int\",\n \"adjust\"=\"float\", \"to\"=apply_adjustment_to)\n \n # default values\n defaults <- list(\"rm.method\"=\"DL\", \"conf.level\"=95, \"digits\"=3, \"adjust\"=.5, \"to\"=\"only0\")\n \n var_order <- c(\"rm.method\", \"conf.level\", \"digits\", \"adjust\", \"to\")\n parameters <- list(\"parameters\"=params, \"defaults\"=defaults, \"var_order\"=var_order)\n}\n\nbinary.random.pretty.names <- function() {\n # sort of redundant to have both this and rm_method_ls but whatever for now...\n rm_method_names <- list(\n HE=\"Hedges-Olkin\",\n DL = \"DerSimonian-Laird\",\n SJ = \"Sidik-Jonkman\",\n ML = \"Maximum Likelihood\",\n REML = \"Restricted Maximum Likelihood\", \n EB = \"Empirical Bayes\")\n \n pretty.names <- list(\"pretty.name\"=\"Binary Random-Effects\", \n \"description\" = \"Performs random-effects meta-analysis.\",\n \"rm.method\"=list(\"pretty.name\"=\"Random-Effects method\", \"description\"=\"Method for estimating between-studies heterogeneity\", \"rm.method.names\"=rm_method_names), \n \"conf.level\"=list(\"pretty.name\"=\"Confidence level\", \"description\"=\"Level at which to compute confidence intervals\"), \n \"digits\"=list(\"pretty.name\"=\"Number of digits of precision to display\", \"description\"=\"Number of digits to display in results\"),\n \"adjust\"=list(\"pretty.name\"=\"Correction factor\", \"description\"=\"Constant c that is added to the entries of a two-by-two table.\"),\n \"to\"=list(\"pretty.name\"=\"Cells to which correction factor should be added\", \"description\"=\"When Add correction factor is set to \\\"only 0\\\", the correction factor\n is added to all cells of each two-by-two table that contains at leason one zero. When set to \\\"all\\\", the correction factor\n is added to all two-by-two tables if at least one table contains a zero.\")\n )\n}\n\nbinary.random.overall <- function(results) {\n # this parses out the overall from the computed result\n res <- results$res\n}" }, { "alpha_fraction": 0.25, "alphanum_fraction": 0.3233333230018616, "avg_line_length": 31.33333396911621, "blob_id": "99c58713259375dd2c39a01c389083841d14abfe", "content_id": "0c32c147294cd31017e900de441bb31031947125", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 300, "license_type": "no_license", "max_line_length": 61, "num_lines": 9, "path": "/src/R/HSROC/R/exp.b.R", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "exp.b <-\r\nfunction (b, y, a, d1, e, d0, r) \r\n{\r\n f.b = exp(b * (-1 + sum(0.5 * y))) * exp(-0.5 * exp(b) * \r\n sum(y * (r + 0.5 * (a + d1 * e))^2)) * exp(-b * (1 + \r\n sum(0.5 * (1 - y)))) * exp(-0.5 * exp(-b) * sum((1 - \r\n y) * (r - 0.5 * (a + d0 * e))^2))\r\n return(f.b)\r\n}\r\n" }, { "alpha_fraction": 0.6710429787635803, "alphanum_fraction": 0.695648193359375, "avg_line_length": 58.35694885253906, "blob_id": "8f08947b299d23fc550fe1a92008c636a4236bdf", "content_id": "a9ee4b2d1575f8c3e693e0c3f16c02aa154fb56d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21784, "license_type": "no_license", "max_line_length": 195, "num_lines": 367, "path": "/src/forms/ui_continuous_data_form.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'continuous_data_form.ui'\n#\n# Created: Mon May 20 09:43:45 2013\n# by: PyQt4 UI code generator 4.10\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt4 import QtCore, QtGui\n\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n def _fromUtf8(s):\n return s\n\ntry:\n _encoding = QtGui.QApplication.UnicodeUTF8\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig, _encoding)\nexcept AttributeError:\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig)\n\nclass Ui_ContinuousDataForm(object):\n def setupUi(self, ContinuousDataForm):\n ContinuousDataForm.setObjectName(_fromUtf8(\"ContinuousDataForm\"))\n ContinuousDataForm.resize(570, 565)\n sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Minimum)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(ContinuousDataForm.sizePolicy().hasHeightForWidth())\n ContinuousDataForm.setSizePolicy(sizePolicy)\n ContinuousDataForm.setMinimumSize(QtCore.QSize(570, 0))\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n ContinuousDataForm.setFont(font)\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(_fromUtf8(\":/misc/meta.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n ContinuousDataForm.setWindowIcon(icon)\n self.verticalLayout_2 = QtGui.QVBoxLayout(ContinuousDataForm)\n self.verticalLayout_2.setObjectName(_fromUtf8(\"verticalLayout_2\"))\n self.simple_table = QtGui.QTableWidget(ContinuousDataForm)\n self.simple_table.setMinimumSize(QtCore.QSize(550, 80))\n self.simple_table.setMaximumSize(QtCore.QSize(550, 80))\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n self.simple_table.setFont(font)\n self.simple_table.setFrameShape(QtGui.QFrame.NoFrame)\n self.simple_table.setFrameShadow(QtGui.QFrame.Plain)\n self.simple_table.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.simple_table.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.simple_table.setAlternatingRowColors(True)\n self.simple_table.setGridStyle(QtCore.Qt.DashLine)\n self.simple_table.setRowCount(2)\n self.simple_table.setColumnCount(8)\n self.simple_table.setObjectName(_fromUtf8(\"simple_table\"))\n item = QtGui.QTableWidgetItem()\n self.simple_table.setVerticalHeaderItem(0, item)\n item = QtGui.QTableWidgetItem()\n self.simple_table.setVerticalHeaderItem(1, item)\n item = QtGui.QTableWidgetItem()\n self.simple_table.setHorizontalHeaderItem(0, item)\n item = QtGui.QTableWidgetItem()\n self.simple_table.setHorizontalHeaderItem(1, item)\n item = QtGui.QTableWidgetItem()\n self.simple_table.setHorizontalHeaderItem(2, item)\n item = QtGui.QTableWidgetItem()\n self.simple_table.setHorizontalHeaderItem(3, item)\n item = QtGui.QTableWidgetItem()\n self.simple_table.setHorizontalHeaderItem(4, item)\n item = QtGui.QTableWidgetItem()\n self.simple_table.setHorizontalHeaderItem(5, item)\n item = QtGui.QTableWidgetItem()\n self.simple_table.setHorizontalHeaderItem(6, item)\n item = QtGui.QTableWidgetItem()\n self.simple_table.setHorizontalHeaderItem(7, item)\n self.verticalLayout_2.addWidget(self.simple_table)\n self.grp_box_pre_post = QtGui.QGroupBox(ContinuousDataForm)\n sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Minimum)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.grp_box_pre_post.sizePolicy().hasHeightForWidth())\n self.grp_box_pre_post.setSizePolicy(sizePolicy)\n self.grp_box_pre_post.setObjectName(_fromUtf8(\"grp_box_pre_post\"))\n self.verticalLayout = QtGui.QVBoxLayout(self.grp_box_pre_post)\n self.verticalLayout.setObjectName(_fromUtf8(\"verticalLayout\"))\n self.g1_pre_post_table = QtGui.QTableWidget(self.grp_box_pre_post)\n self.g1_pre_post_table.setMinimumSize(QtCore.QSize(490, 81))\n self.g1_pre_post_table.setMaximumSize(QtCore.QSize(490, 81))\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n self.g1_pre_post_table.setFont(font)\n self.g1_pre_post_table.setFrameShape(QtGui.QFrame.NoFrame)\n self.g1_pre_post_table.setFrameShadow(QtGui.QFrame.Plain)\n self.g1_pre_post_table.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.g1_pre_post_table.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.g1_pre_post_table.setAlternatingRowColors(True)\n self.g1_pre_post_table.setShowGrid(True)\n self.g1_pre_post_table.setGridStyle(QtCore.Qt.DashLine)\n self.g1_pre_post_table.setRowCount(2)\n self.g1_pre_post_table.setColumnCount(7)\n self.g1_pre_post_table.setObjectName(_fromUtf8(\"g1_pre_post_table\"))\n item = QtGui.QTableWidgetItem()\n self.g1_pre_post_table.setVerticalHeaderItem(0, item)\n item = QtGui.QTableWidgetItem()\n self.g1_pre_post_table.setVerticalHeaderItem(1, item)\n item = QtGui.QTableWidgetItem()\n self.g1_pre_post_table.setHorizontalHeaderItem(0, item)\n item = QtGui.QTableWidgetItem()\n self.g1_pre_post_table.setHorizontalHeaderItem(1, item)\n item = QtGui.QTableWidgetItem()\n self.g1_pre_post_table.setHorizontalHeaderItem(2, item)\n item = QtGui.QTableWidgetItem()\n self.g1_pre_post_table.setHorizontalHeaderItem(3, item)\n item = QtGui.QTableWidgetItem()\n self.g1_pre_post_table.setHorizontalHeaderItem(4, item)\n item = QtGui.QTableWidgetItem()\n self.g1_pre_post_table.setHorizontalHeaderItem(5, item)\n item = QtGui.QTableWidgetItem()\n self.g1_pre_post_table.setHorizontalHeaderItem(6, item)\n self.verticalLayout.addWidget(self.g1_pre_post_table)\n self.grp_1_lbl = QtGui.QLabel(self.grp_box_pre_post)\n font = QtGui.QFont()\n font.setPointSize(7)\n font.setBold(True)\n font.setItalic(True)\n font.setWeight(75)\n self.grp_1_lbl.setFont(font)\n self.grp_1_lbl.setLayoutDirection(QtCore.Qt.LeftToRight)\n self.grp_1_lbl.setObjectName(_fromUtf8(\"grp_1_lbl\"))\n self.verticalLayout.addWidget(self.grp_1_lbl)\n self.g2_pre_post_table = QtGui.QTableWidget(self.grp_box_pre_post)\n self.g2_pre_post_table.setMinimumSize(QtCore.QSize(490, 81))\n self.g2_pre_post_table.setMaximumSize(QtCore.QSize(490, 81))\n self.g2_pre_post_table.setFrameShape(QtGui.QFrame.NoFrame)\n self.g2_pre_post_table.setFrameShadow(QtGui.QFrame.Plain)\n self.g2_pre_post_table.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.g2_pre_post_table.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.g2_pre_post_table.setAlternatingRowColors(True)\n self.g2_pre_post_table.setGridStyle(QtCore.Qt.DashLine)\n self.g2_pre_post_table.setRowCount(2)\n self.g2_pre_post_table.setColumnCount(7)\n self.g2_pre_post_table.setObjectName(_fromUtf8(\"g2_pre_post_table\"))\n item = QtGui.QTableWidgetItem()\n self.g2_pre_post_table.setVerticalHeaderItem(0, item)\n item = QtGui.QTableWidgetItem()\n self.g2_pre_post_table.setVerticalHeaderItem(1, item)\n item = QtGui.QTableWidgetItem()\n self.g2_pre_post_table.setHorizontalHeaderItem(0, item)\n item = QtGui.QTableWidgetItem()\n self.g2_pre_post_table.setHorizontalHeaderItem(1, item)\n item = QtGui.QTableWidgetItem()\n self.g2_pre_post_table.setHorizontalHeaderItem(2, item)\n item = QtGui.QTableWidgetItem()\n self.g2_pre_post_table.setHorizontalHeaderItem(3, item)\n item = QtGui.QTableWidgetItem()\n self.g2_pre_post_table.setHorizontalHeaderItem(4, item)\n item = QtGui.QTableWidgetItem()\n self.g2_pre_post_table.setHorizontalHeaderItem(5, item)\n item = QtGui.QTableWidgetItem()\n self.g2_pre_post_table.setHorizontalHeaderItem(6, item)\n self.verticalLayout.addWidget(self.g2_pre_post_table)\n self.grp_2_lbl = QtGui.QLabel(self.grp_box_pre_post)\n font = QtGui.QFont()\n font.setPointSize(7)\n font.setBold(True)\n font.setItalic(True)\n font.setWeight(75)\n self.grp_2_lbl.setFont(font)\n self.grp_2_lbl.setObjectName(_fromUtf8(\"grp_2_lbl\"))\n self.verticalLayout.addWidget(self.grp_2_lbl)\n self.horizontalLayout_3 = QtGui.QHBoxLayout()\n self.horizontalLayout_3.setObjectName(_fromUtf8(\"horizontalLayout_3\"))\n spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)\n self.horizontalLayout_3.addItem(spacerItem)\n self.label = QtGui.QLabel(self.grp_box_pre_post)\n self.label.setObjectName(_fromUtf8(\"label\"))\n self.horizontalLayout_3.addWidget(self.label)\n self.correlation_pre_post = QtGui.QLineEdit(self.grp_box_pre_post)\n self.correlation_pre_post.setMinimumSize(QtCore.QSize(40, 0))\n self.correlation_pre_post.setMaximumSize(QtCore.QSize(40, 16777215))\n self.correlation_pre_post.setObjectName(_fromUtf8(\"correlation_pre_post\"))\n self.horizontalLayout_3.addWidget(self.correlation_pre_post)\n self.verticalLayout.addLayout(self.horizontalLayout_3)\n self.verticalLayout_2.addWidget(self.grp_box_pre_post)\n self.horizontalLayout_6 = QtGui.QHBoxLayout()\n self.horizontalLayout_6.setObjectName(_fromUtf8(\"horizontalLayout_6\"))\n self.clear_Btn = QtGui.QPushButton(ContinuousDataForm)\n self.clear_Btn.setObjectName(_fromUtf8(\"clear_Btn\"))\n self.horizontalLayout_6.addWidget(self.clear_Btn)\n spacerItem1 = QtGui.QSpacerItem(108, 18, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)\n self.horizontalLayout_6.addItem(spacerItem1)\n self.back_calc_btn = QtGui.QPushButton(ContinuousDataForm)\n self.back_calc_btn.setEnabled(False)\n self.back_calc_btn.setObjectName(_fromUtf8(\"back_calc_btn\"))\n self.horizontalLayout_6.addWidget(self.back_calc_btn)\n self.verticalLayout_2.addLayout(self.horizontalLayout_6)\n self.horizontalLayout_2 = QtGui.QHBoxLayout()\n self.horizontalLayout_2.setObjectName(_fromUtf8(\"horizontalLayout_2\"))\n self.label_13 = QtGui.QLabel(ContinuousDataForm)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n self.label_13.setFont(font)\n self.label_13.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft)\n self.label_13.setObjectName(_fromUtf8(\"label_13\"))\n self.horizontalLayout_2.addWidget(self.label_13)\n self.effect_cbo_box = QtGui.QComboBox(ContinuousDataForm)\n self.effect_cbo_box.setMinimumSize(QtCore.QSize(76, 20))\n self.effect_cbo_box.setMaximumSize(QtCore.QSize(76, 20))\n self.effect_cbo_box.setObjectName(_fromUtf8(\"effect_cbo_box\"))\n self.horizontalLayout_2.addWidget(self.effect_cbo_box)\n spacerItem2 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)\n self.horizontalLayout_2.addItem(spacerItem2)\n self.verticalLayout_2.addLayout(self.horizontalLayout_2)\n self.gridLayout = QtGui.QGridLayout()\n self.gridLayout.setObjectName(_fromUtf8(\"gridLayout\"))\n spacerItem3 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)\n self.gridLayout.addItem(spacerItem3, 0, 0, 2, 1)\n self.ci_label = QtGui.QLabel(ContinuousDataForm)\n self.ci_label.setObjectName(_fromUtf8(\"ci_label\"))\n self.gridLayout.addWidget(self.ci_label, 0, 2, 1, 1)\n spacerItem4 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)\n self.gridLayout.addItem(spacerItem4, 0, 3, 2, 1)\n self.horizontalLayout_4 = QtGui.QHBoxLayout()\n self.horizontalLayout_4.setObjectName(_fromUtf8(\"horizontalLayout_4\"))\n self.label_14 = QtGui.QLabel(ContinuousDataForm)\n self.label_14.setMinimumSize(QtCore.QSize(0, 20))\n self.label_14.setMaximumSize(QtCore.QSize(16777215, 20))\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n self.label_14.setFont(font)\n self.label_14.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft)\n self.label_14.setObjectName(_fromUtf8(\"label_14\"))\n self.horizontalLayout_4.addWidget(self.label_14)\n self.effect_txt_box = QtGui.QLineEdit(ContinuousDataForm)\n self.effect_txt_box.setMinimumSize(QtCore.QSize(50, 20))\n self.effect_txt_box.setMaximumSize(QtCore.QSize(50, 20))\n self.effect_txt_box.setObjectName(_fromUtf8(\"effect_txt_box\"))\n self.horizontalLayout_4.addWidget(self.effect_txt_box)\n self.gridLayout.addLayout(self.horizontalLayout_4, 1, 1, 1, 1)\n self.horizontalLayout_5 = QtGui.QHBoxLayout()\n self.horizontalLayout_5.setObjectName(_fromUtf8(\"horizontalLayout_5\"))\n self.label_15 = QtGui.QLabel(ContinuousDataForm)\n self.label_15.setMaximumSize(QtCore.QSize(16777215, 20))\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n self.label_15.setFont(font)\n self.label_15.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft)\n self.label_15.setObjectName(_fromUtf8(\"label_15\"))\n self.horizontalLayout_5.addWidget(self.label_15)\n self.low_txt_box = QtGui.QLineEdit(ContinuousDataForm)\n self.low_txt_box.setMinimumSize(QtCore.QSize(50, 20))\n self.low_txt_box.setMaximumSize(QtCore.QSize(50, 20))\n self.low_txt_box.setObjectName(_fromUtf8(\"low_txt_box\"))\n self.horizontalLayout_5.addWidget(self.low_txt_box)\n self.label_2 = QtGui.QLabel(ContinuousDataForm)\n self.label_2.setObjectName(_fromUtf8(\"label_2\"))\n self.horizontalLayout_5.addWidget(self.label_2)\n self.high_txt_box = QtGui.QLineEdit(ContinuousDataForm)\n self.high_txt_box.setMinimumSize(QtCore.QSize(50, 20))\n self.high_txt_box.setMaximumSize(QtCore.QSize(50, 20))\n self.high_txt_box.setObjectName(_fromUtf8(\"high_txt_box\"))\n self.horizontalLayout_5.addWidget(self.high_txt_box)\n self.label_16 = QtGui.QLabel(ContinuousDataForm)\n self.label_16.setMaximumSize(QtCore.QSize(16777215, 20))\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n self.label_16.setFont(font)\n self.label_16.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft)\n self.label_16.setObjectName(_fromUtf8(\"label_16\"))\n self.horizontalLayout_5.addWidget(self.label_16)\n self.gridLayout.addLayout(self.horizontalLayout_5, 1, 2, 1, 1)\n self.verticalLayout_2.addLayout(self.gridLayout)\n self.buttonBox = QtGui.QDialogButtonBox(ContinuousDataForm)\n self.buttonBox.setOrientation(QtCore.Qt.Horizontal)\n self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)\n self.buttonBox.setObjectName(_fromUtf8(\"buttonBox\"))\n self.verticalLayout_2.addWidget(self.buttonBox)\n\n self.retranslateUi(ContinuousDataForm)\n QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8(\"accepted()\")), ContinuousDataForm.accept)\n QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8(\"rejected()\")), ContinuousDataForm.reject)\n QtCore.QMetaObject.connectSlotsByName(ContinuousDataForm)\n\n def retranslateUi(self, ContinuousDataForm):\n ContinuousDataForm.setWindowTitle(_translate(\"ContinuousDataForm\", \"Continuous Data\", None))\n item = self.simple_table.verticalHeaderItem(0)\n item.setText(_translate(\"ContinuousDataForm\", \"group 1\", None))\n item = self.simple_table.verticalHeaderItem(1)\n item.setText(_translate(\"ContinuousDataForm\", \"group 2\", None))\n item = self.simple_table.horizontalHeaderItem(0)\n item.setText(_translate(\"ContinuousDataForm\", \"n\", None))\n item = self.simple_table.horizontalHeaderItem(1)\n item.setText(_translate(\"ContinuousDataForm\", \"mean\", None))\n item = self.simple_table.horizontalHeaderItem(2)\n item.setText(_translate(\"ContinuousDataForm\", \"sd\", None))\n item = self.simple_table.horizontalHeaderItem(3)\n item.setText(_translate(\"ContinuousDataForm\", \"se\", None))\n item = self.simple_table.horizontalHeaderItem(4)\n item.setText(_translate(\"ContinuousDataForm\", \"var\", None))\n item = self.simple_table.horizontalHeaderItem(5)\n item.setText(_translate(\"ContinuousDataForm\", \"pval\", None))\n item = self.simple_table.horizontalHeaderItem(6)\n item.setText(_translate(\"ContinuousDataForm\", \"low\", None))\n item = self.simple_table.horizontalHeaderItem(7)\n item.setText(_translate(\"ContinuousDataForm\", \"high\", None))\n self.grp_box_pre_post.setTitle(_translate(\"ContinuousDataForm\", \"pre / post\", None))\n item = self.g1_pre_post_table.verticalHeaderItem(0)\n item.setText(_translate(\"ContinuousDataForm\", \"pre\", None))\n item = self.g1_pre_post_table.verticalHeaderItem(1)\n item.setText(_translate(\"ContinuousDataForm\", \"post\", None))\n item = self.g1_pre_post_table.horizontalHeaderItem(0)\n item.setText(_translate(\"ContinuousDataForm\", \"n\", None))\n item = self.g1_pre_post_table.horizontalHeaderItem(1)\n item.setText(_translate(\"ContinuousDataForm\", \"mean\", None))\n item = self.g1_pre_post_table.horizontalHeaderItem(2)\n item.setText(_translate(\"ContinuousDataForm\", \"sd\", None))\n item = self.g1_pre_post_table.horizontalHeaderItem(3)\n item.setText(_translate(\"ContinuousDataForm\", \"se\", None))\n item = self.g1_pre_post_table.horizontalHeaderItem(4)\n item.setText(_translate(\"ContinuousDataForm\", \"var\", None))\n item = self.g1_pre_post_table.horizontalHeaderItem(5)\n item.setText(_translate(\"ContinuousDataForm\", \"low\", None))\n item = self.g1_pre_post_table.horizontalHeaderItem(6)\n item.setText(_translate(\"ContinuousDataForm\", \"high\", None))\n self.grp_1_lbl.setText(_translate(\"ContinuousDataForm\", \"<!DOCTYPE HTML PUBLIC \\\"-//W3C//DTD HTML 4.0//EN\\\" \\\"http://www.w3.org/TR/REC-html40/strict.dtd\\\">\\n\"\n\"<html><head><meta name=\\\"qrichtext\\\" content=\\\"1\\\" /><style type=\\\"text/css\\\">\\n\"\n\"p, li { white-space: pre-wrap; }\\n\"\n\"</style></head><body style=\\\" font-family:\\'Verdana\\'; font-size:8pt; font-weight:600; font-style:italic;\\\">\\n\"\n\"<p style=\\\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\\\"><span style=\\\" font-size:7pt;\\\">group 1</span></p></body></html>\", None))\n item = self.g2_pre_post_table.verticalHeaderItem(0)\n item.setText(_translate(\"ContinuousDataForm\", \"pre\", None))\n item = self.g2_pre_post_table.verticalHeaderItem(1)\n item.setText(_translate(\"ContinuousDataForm\", \"post\", None))\n item = self.g2_pre_post_table.horizontalHeaderItem(0)\n item.setText(_translate(\"ContinuousDataForm\", \"n\", None))\n item = self.g2_pre_post_table.horizontalHeaderItem(1)\n item.setText(_translate(\"ContinuousDataForm\", \"mean\", None))\n item = self.g2_pre_post_table.horizontalHeaderItem(2)\n item.setText(_translate(\"ContinuousDataForm\", \"sd\", None))\n item = self.g2_pre_post_table.horizontalHeaderItem(3)\n item.setText(_translate(\"ContinuousDataForm\", \"se\", None))\n item = self.g2_pre_post_table.horizontalHeaderItem(4)\n item.setText(_translate(\"ContinuousDataForm\", \"var\", None))\n item = self.g2_pre_post_table.horizontalHeaderItem(5)\n item.setText(_translate(\"ContinuousDataForm\", \"low\", None))\n item = self.g2_pre_post_table.horizontalHeaderItem(6)\n item.setText(_translate(\"ContinuousDataForm\", \"high\", None))\n self.grp_2_lbl.setText(_translate(\"ContinuousDataForm\", \"<!DOCTYPE HTML PUBLIC \\\"-//W3C//DTD HTML 4.0//EN\\\" \\\"http://www.w3.org/TR/REC-html40/strict.dtd\\\">\\n\"\n\"<html><head><meta name=\\\"qrichtext\\\" content=\\\"1\\\" /><style type=\\\"text/css\\\">\\n\"\n\"p, li { white-space: pre-wrap; }\\n\"\n\"</style></head><body style=\\\" font-family:\\'Verdana\\'; font-size:8pt; font-weight:600; font-style:italic;\\\">\\n\"\n\"<p style=\\\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\\\"><span style=\\\" font-size:7pt;\\\">group 2</span></p></body></html>\", None))\n self.label.setText(_translate(\"ContinuousDataForm\", \"correlation:\", None))\n self.correlation_pre_post.setText(_translate(\"ContinuousDataForm\", \"0.0\", None))\n self.clear_Btn.setText(_translate(\"ContinuousDataForm\", \"Clear Form\", None))\n self.back_calc_btn.setText(_translate(\"ContinuousDataForm\", \"back-calculate table\", None))\n self.label_13.setText(_translate(\"ContinuousDataForm\", \"effect\", None))\n self.ci_label.setText(_translate(\"ContinuousDataForm\", \"(X% confidence interval)\", None))\n self.label_14.setText(_translate(\"ContinuousDataForm\", \"est.\", None))\n self.label_15.setText(_translate(\"ContinuousDataForm\", \"[\", None))\n self.label_2.setText(_translate(\"ContinuousDataForm\", \",\", None))\n self.label_16.setText(_translate(\"ContinuousDataForm\", \"]\", None))\n\nimport icons_rc\n" }, { "alpha_fraction": 0.34285715222358704, "alphanum_fraction": 0.34285715222358704, "avg_line_length": 10.727272987365723, "blob_id": "f617d48f4cd90ea7419b80c283320a760f3332e5", "content_id": "cfebe775ea60f5f5458e6a5edf15032a97d2230e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 140, "license_type": "no_license", "max_line_length": 20, "num_lines": 11, "path": "/src/R/HSROC/R/limit.theta.R", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "limit.theta <-\r\nfunction (x, x.lim) \r\n{\r\n if (is.na(x)) {\r\n x = x.lim\r\n }\r\n else {\r\n x = x\r\n }\r\n return(x)\r\n}\r\n" }, { "alpha_fraction": 0.5413212776184082, "alphanum_fraction": 0.5462390184402466, "avg_line_length": 42.69324493408203, "blob_id": "857216e3ed781584dd0a0e86d305555689b4b69e", "content_id": "64e8652b63a276e55774a7592d1ff2029dad0bff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 32332, "license_type": "no_license", "max_line_length": 125, "num_lines": 740, "path": "/src/diagnostic_data_form.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "##################################################\n#\n# Byron C. Wallace \n# George Dietz \n# CEBM @ Brown \n# OpenMeta[analyst] \n#\n# ---\n# Diagnostic data form module; for flexible entry of diagnostic\n# outcome data.\n#\n##################################################\n\nimport copy\nfrom functools import partial\n\nfrom PyQt4.Qt import *\n\nimport meta_py_r\nfrom meta_globals import *\nimport calculator_routines as calc_fncs\nfrom forms.ui_diagnostic_data_form import Ui_DiagnosticDataForm\n\nBACK_CALCULATABLE_DIAGNOSTIC_EFFECTS = [\"Sens\", \"Spec\"]\n\nclass DiagnosticDataForm(QDialog, Ui_DiagnosticDataForm):\n def __init__(self, ma_unit, cur_txs, cur_group_str, conf_level=None, parent=None):\n super(DiagnosticDataForm, self).__init__(parent)\n self.setupUi(self)\n \n if conf_level is None:\n raise ValueError(\"Confidence level must be specified\")\n self.global_conf_level = conf_level\n self.mult = meta_py_r.get_mult_from_r(self.global_conf_level)\n \n self.setup_signals_and_slots()\n \n # Assign stuff\n self.ma_unit = ma_unit\n self.cur_groups = cur_txs\n self.group_str = cur_group_str\n self.cur_effect = \"Sens\" # arbitrary\n self.entry_widgets = [self.two_by_two_table, self.prevalence_txt_box,\n self.low_txt_box, self.high_txt_box,\n self.effect_txt_box,]\n self.text_boxes = [self.low_txt_box, self.high_txt_box,\n self.effect_txt_box, self.prevalence_txt_box]\n \n self.ci_label.setText(\"{0:.1f}% Confidence Interval\".format(self.global_conf_level))\n self.initialize_form()\n self.setup_inconsistency_checking()\n self.undoStack = QUndoStack(self)\n \n #self.setup_clear_button_palettes()\n self._update_raw_data() # ma_unit -> table\n self._populate_effect_cmbo_box() # make cmbo box entries for effects\n self.set_current_effect() # fill in current effect data in line edits\n self._update_data_table() # fill in the rest of the data table\n self.enable_back_calculation_btn()\n \n self.current_prevalence = self._get_prevalence_str()\n \n# # Color for clear_button_pallette\n# self.orig_palette = self.clear_Btn.palette()\n# self.pushme_palette = QPalette()\n# self.pushme_palette.setColor(QPalette.ButtonText,Qt.red)\n# self.set_clear_btn_color()\n \n# def setup_clear_button_palettes(self):\n# # Color for clear_button_pallette\n# self.orig_palette = self.clear_Btn.palette()\n# self.pushme_palette = QPalette()\n# self.pushme_palette.setColor(QPalette.ButtonText,Qt.red)\n# self.set_clear_btn_color()\n\n\n def initialize_form(self):\n ''' Initialize all cells to empty items '''\n \n nrows = self.two_by_two_table.rowCount()\n ncols = self.two_by_two_table.columnCount()\n \n for row in range(nrows):\n for col in range(ncols):\n self._set_val(row, col, None)\n\n for txt_box in self.text_boxes:\n txt_box.setText(QString(\"\"))\n \n def setup_signals_and_slots(self):\n QObject.connect(self.two_by_two_table, SIGNAL(\"cellChanged (int, int)\"), self.cell_changed) \n QObject.connect(self.effect_cbo_box, SIGNAL(\"currentIndexChanged(QString)\"), self.effect_changed) \n QObject.connect(self.clear_Btn, SIGNAL(\"clicked()\"), self.clear_form)\n QObject.connect(self.back_calc_Btn, SIGNAL(\"clicked()\"), lambda: self.enable_back_calculation_btn(engage=True))\n \n QObject.connect(self.effect_txt_box, SIGNAL(\"editingFinished()\"), lambda: self.val_changed(\"est\"))\n QObject.connect(self.low_txt_box, SIGNAL(\"editingFinished()\"), lambda: self.val_changed(\"lower\"))\n QObject.connect(self.high_txt_box, SIGNAL(\"editingFinished()\"), lambda: self.val_changed(\"upper\"))\n QObject.connect(self.prevalence_txt_box, SIGNAL(\"editingFinished()\"), lambda: self.val_changed(\"prevalence\"))\n\n # Add undo/redo actions\n undo = QAction(self)\n redo = QAction(self)\n undo.setShortcut(QKeySequence.Undo)\n redo.setShortcut(QKeySequence.Redo)\n self.addAction(undo)\n self.addAction(redo)\n QObject.connect(undo, SIGNAL(\"triggered()\"), self.undo)\n QObject.connect(redo, SIGNAL(\"triggered()\"), self.redo)\n \n \n @pyqtSignature(\"int, int, int, int\")\n def on_two_by_two_table_currentCellChanged(self,currentRow,currentColumn,previousRow,previousColumn):\n self.current_item_data = self._get_int(currentRow, currentColumn)\n print(\"Current item data @ (%d, %d) is: %s\" % (currentRow,\n currentColumn,\n str(self.current_item_data)))\n\n def setup_inconsistency_checking(self):\n # set-up inconsistency label\n inconsistency_palette = QPalette()\n inconsistency_palette.setColor(QPalette.WindowText,Qt.red)\n self.inconsistencyLabel.setPalette(inconsistency_palette)\n self.inconsistencyLabel.setVisible(False)\n \n def action_consistent_table(): \n self.inconsistencyLabel.setVisible(False)\n self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(True)\n def action_inconsistent_table():\n #show label, disable OK buttonbox button\n self.inconsistencyLabel.setVisible(True)\n self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(False)\n \n self.check_table_consistency = calc_fncs.ConsistencyChecker(\n fn_consistent=action_consistent_table,\n fn_inconsistent=action_inconsistent_table,\n table_2x2 = self.two_by_two_table)\n\n def _get_int(self, i, j):\n try:\n if not self._is_empty(i,j):\n int_val = int(float(self.two_by_two_table.item(i, j).text()))\n return int_val\n except:\n # Should never appear....\n msg = \"Could not convert %s to integer\" % self.two_by_two_table.item(i, j)\n QMessageBox.warning(self.parent(), \"whoops\", msg)\n raise Exception(\"Could not convert %s to int\" % self.two_by_two_table.item(i, j))\n \n def cell_data_invalid(self, celldata_string):\n # ignore blank entries\n if celldata_string.trimmed() == \"\" or celldata_string is None:\n return None\n\n if not is_a_float(celldata_string):\n return \"Raw data needs to be numeric.\"\n\n if not is_an_int(celldata_string):\n return \"Expecting count data -- you provided a float (?)\"\n\n if int(celldata_string) < 0:\n return \"Counts cannot be negative.\"\n return None\n\n def _is_empty(self, i, j):\n val = self.two_by_two_table.item(i,j)\n return val is None or val.text() == \"\" or val.text() == None\n def _is_invalid(self, i, j):\n val = self.two_by_two_table.item(i,j)\n try:\n int(val.text())\n except:\n return True\n return False\n \n def _is_txt_box_invalid(self, txt_box):\n val = txt_box.text()\n empty = val in EMPTY_VALS\n return is_NaN(val) or empty or (not is_a_float(val))\n \n def _set_val(self, row, col, val):\n if is_NaN(val): # get out quick\n print \"%s is not a number\" % val\n return\n \n try:\n str_val = \"\" if val in EMPTY_VALS else str(int(val))\n self.two_by_two_table.blockSignals(True)\n if self.two_by_two_table.item(row, col) == None:\n self.two_by_two_table.setItem(row, col, QTableWidgetItem(str_val))\n else:\n self.two_by_two_table.item(row, col).setText(str_val)\n self.two_by_two_table.blockSignals(False)\n \n if str_val != \"\": #disable item\n self.two_by_two_table.blockSignals(True)\n item = self.two_by_two_table.item(row, col)\n newflags = item.flags() & ~Qt.ItemIsEditable\n item.setFlags(newflags)\n self.two_by_two_table.blockSignals(False)\n except:\n print(\"Got to except in _set_val when trying to set (%d,%d)\" % (row,col)) \n \n def _set_vals(self, computed_d):\n '''Sets values in table widget'''\n \n self.two_by_two_table.blockSignals(True)\n self._set_val(0, 0, computed_d[\"c11\"])\n self._set_val(0, 1, computed_d[\"c12\"])\n self._set_val(1, 0, computed_d[\"c21\"])\n self._set_val(1, 1, computed_d[\"c22\"]) \n self._set_val(0, 2, computed_d[\"r1sum\"])\n self._set_val(1, 2, computed_d[\"r2sum\"])\n self._set_val(2, 0, computed_d[\"c1sum\"])\n self._set_val(2, 1, computed_d[\"c2sum\"]) \n self._set_val(2, 2, computed_d[\"total\"]) \n self.two_by_two_table.blockSignals(False)\n \n def _get_prevalence_str(self):\n return str(self.prevalence_txt_box.text())\n\n def cell_changed(self, row, col):\n old_ma_unit, old_table = self._save_ma_unit_and_table_state(\n table = self.two_by_two_table,\n ma_unit = self.ma_unit, \n old_value = self.current_item_data,\n row = row, col = col, use_old_value=True)\n old_prevalence = self._get_prevalence_str()\n \n try:\n # Test if entered data is valid (a number)\n warning_msg = self.cell_data_invalid(self.two_by_two_table.item(row, col).text())\n if warning_msg:\n raise Exception(\"Invalid Cell Data\")\n \n self._update_data_table() # calculate rest of table (provisionally) based on new entry\n warning_msg = self.check_table_consistency.run()\n if warning_msg:\n raise Exception(\"Table no longer consistent.\")\n except Exception as e:\n msg = e.args[0]\n QMessageBox.warning(self.parent(), \"whoops\", msg) #popup warning\n self.restore_ma_unit_and_table(old_ma_unit,old_table, old_prevalence) # brings things back to the way they were\n return # and leave\n \n # if we got here, everything seems ok\n self._update_ma_unit() # 2x2 table --> ma_unit\n self.impute_effects_in_ma_unit() # effects --> ma_unit\n self.set_current_effect() # ma_unit --> effects\n \n new_ma_unit, new_table = self._save_ma_unit_and_table_state(\n table = self.two_by_two_table,\n ma_unit = self.ma_unit, \n row = row, col = col,\n use_old_value = False)\n new_prevalence = self._get_prevalence_str()\n restore_old_f = lambda: self.restore_ma_unit_and_table(old_ma_unit, old_table, old_prevalence)\n restore_new_f = lambda: self.restore_ma_unit_and_table(new_ma_unit, new_table, new_prevalence)\n command = calc_fncs.CommandFieldChanged(restore_new_f=restore_new_f, restore_old_f=restore_old_f, parent=self)\n self.undoStack.push(command)\n \n def restore_ma_unit(self, old_ma_unit):\n ''' Restores the ma_unit data and resets the form'''\n self.ma_unit.__dict__ = copy.deepcopy(old_ma_unit.__dict__)\n print(\"Restored ma_unit data: %s\" % str(self.ma_unit.get_raw_data_for_groups(self.cur_groups)))\n \n self.initialize_form() # clear form first\n self._update_raw_data()\n self.set_current_effect()\n self._update_data_table()\n self.enable_back_calculation_btn()\n \n def restore_table(self, old_table_data):\n nrows = len(old_table_data)\n ncols = len(old_table_data[0])\n \n for row in range(nrows):\n for col in range(ncols):\n self.two_by_two_table.blockSignals(True)\n self._set_val(row, col, old_table_data[row][col])\n self.two_by_two_table.blockSignals(False)\n self.check_table_consistency.run()\n \n def restore_ma_unit_and_table(self, old_ma_unit, old_table, old_prevalence):\n self.restore_ma_unit(old_ma_unit)\n self.restore_table(old_table)\n self.prevalence_txt_box.setText(old_prevalence)\n \n def _save_ma_unit_and_table_state(self, table, ma_unit, row=None, col=None,\n old_value=None, use_old_value=True):\n # Make backup of table info...\n old_table = calc_fncs.save_table_data(table)\n if use_old_value:\n old_table[row][col] = old_value # ...from BEFORE the cell changed\n \n # Make backup copy of ma_unit\n old_ma_unit = copy.deepcopy(ma_unit)\n return old_ma_unit, old_table\n\n \n def getTotalSubjects(self):\n try:\n return int(self.table_backup[2][2])\n except:\n return None\n\n def print_backup_table(self):\n for row in range(3):\n line = \"\"\n for col in range(3):\n line += self.table_backup[row][col] + \", \"\n print line\n \n def _get_table_vals(self):\n ''' Package table from 2x2 table in to a dictionary'''\n \n vals_d = {}\n vals_d[\"c11\"] = self._get_int(0, 0)\n vals_d[\"c12\"] = self._get_int(0, 1)\n vals_d[\"c21\"] = self._get_int(1, 0)\n vals_d[\"c22\"] = self._get_int(1, 1)\n vals_d[\"r1sum\"] = self._get_int(0, 2)\n vals_d[\"r2sum\"] = self._get_int(1, 2)\n vals_d[\"c1sum\"] = self._get_int(2, 0)\n vals_d[\"c2sum\"] = self._get_int(2, 1)\n vals_d[\"total\"] = self._get_int(2, 2)\n return vals_d\n \n def impute_effects_in_ma_unit(self):\n '''Calculate and store values for effects in ma_unit based on values in 2x2 table'''\n \n # diagnostic data\n counts = self.get_raw_diag_data()\n tp, fn, fp, tn = counts['TP'], counts['FN'], counts['FP'], counts['TN']\n \n # Do what we can if we don't have all the counts\n can_calculate_sens, can_calculate_spec = True, True\n if None in [tp,fn]:\n can_calculate_sens = False\n tp,fn = 0,0 # dummy data\n if None in [tn,fp]:\n can_calculate_spec = False\n tn, fp = 0,0 # dummy data\n \n # sensitivity and specificity\n ests_and_cis = meta_py_r.diagnostic_effects_for_study(\n tp, fn, fp, tn, metrics=DIAGNOSTIC_METRICS,\n conf_level=self.global_conf_level)\n \n # now we're going to set the effect estimate/CI on the MA object.\n for metric in DIAGNOSTIC_METRICS:\n # don't set stuff if it made-up\n if metric.lower()==\"sens\" and not can_calculate_sens:\n continue\n elif metric.lower()==\"spec\" and not can_calculate_spec:\n continue\n \n est, lower, upper = ests_and_cis[metric][\"calc_scale\"]\n self.ma_unit.set_effect_and_ci(metric, self.group_str, est, lower, upper, mult=self.mult)\n\n def _get_row_col(self, field):\n row = 0 if field in (\"FP\", \"TP\") else 1\n col = 1 if field in (\"FP\", \"TN\") else 0\n return (row, col)\n\n def update_2x2_table(self, imputed_dict):\n ''' Fill in entries in 2x2 table and add data to ma_unit'''\n \n print \"Updating 2x2......\"\n \n # reset relevant column and sums column if we have new data\n if imputed_dict[\"TP\"] and imputed_dict[\"FN\"]:\n print(\"TP, FN:\", imputed_dict[\"TP\"],imputed_dict[\"FN\"])\n print \"clearing col 0 and 2\"\n self.clear_column(0)\n self.clear_column(2)\n if imputed_dict[\"TN\"] and imputed_dict[\"FP\"]:\n print \"clearing col 1 and 2\"\n self.clear_column(1)\n self.clear_column(2)\n \n for field in [\"FP\", \"TP\", \"TN\", \"FN\"]:\n if (field in imputed_dict) and (not imputed_dict[field] is None):\n row, col = self._get_row_col(field)\n self._set_val(row, col, imputed_dict[field])\n # here we update the MA unit\n raw_data_index = DIAG_FIELDS_TO_RAW_INDICES[field]\n \n # TODO: ENC\n self.ma_unit.tx_groups[self.group_str].raw_data[raw_data_index] =\\\n None if not is_a_float(imputed_dict[field]) else float(imputed_dict[field])\n \n def _update_ma_unit(self):\n '''Copy data from data table to the MA_unit'''\n \n print \"updating ma unit....\"\n raw_dict = self.get_raw_diag_data() # values are floats or None\n for field in raw_dict.iterkeys():\n i = DIAG_FIELDS_TO_RAW_INDICES[field]\n self.ma_unit.tx_groups[self.group_str].raw_data[i] = raw_dict[field] # TODO: ENC\n \n def get_raw_diag_data(self,convert_None_to_NA_string=False):\n '''Returns a dictionary of the raw data in the table (TP,FN,FP,TN), \n None for empty cell'''\n \n NoneValue = \"NA\" if convert_None_to_NA_string else None\n \n d={}\n d[\"TP\"] = float(self._get_int(0,0)) if not self._is_empty(0,0) else NoneValue\n d[\"FN\"] = float(self._get_int(1,0)) if not self._is_empty(1,0) else NoneValue\n d[\"FP\"] = float(self._get_int(0,1)) if not self._is_empty(0,1) else NoneValue\n d[\"TN\"] = float(self._get_int(1,1)) if not self._is_empty(1,1) else NoneValue\n return d\n \n def _text_box_value_is_between_bounds(self, val_str, new_text):\n display_scale_val = \"\"\n \n get_disp_scale_val_if_valid = partial(\n calc_fncs.evaluate, new_text=new_text, ma_unit=self.ma_unit,\n curr_effect=self.cur_effect, group_str=self.group_str,\n conv_to_disp_scale = partial(meta_py_r.diagnostic_convert_scale,\n metric_name=self.cur_effect,\n convert_to=\"display.scale\"),\n parent=self, mult=self.mult)\n \n calc_fncs.block_signals(self.entry_widgets, True)\n try:\n if val_str == \"est\" and not is_empty(new_text):\n display_scale_val = get_disp_scale_val_if_valid(ci_param='est')\n elif val_str == \"lower\" and not is_empty(new_text):\n display_scale_val = get_disp_scale_val_if_valid(ci_param='low')\n elif val_str == \"upper\" and not is_empty(new_text):\n display_scale_val = get_disp_scale_val_if_valid(ci_param='high')\n elif val_str == \"prevalence\" and not is_empty(new_text):\n get_disp_scale_val_if_valid(opt_cmp_fn = lambda x: 0 <= float(x) <= 1,\n opt_cmp_msg=\"Prevalence must be between 0 and 1.\")\n except:\n calc_fncs.block_signals(self.entry_widgets, False)\n return False, False\n calc_fncs.block_signals(self.entry_widgets, False)\n return True, display_scale_val\n\n def _get_txt_from_val_str(self, val_str):\n if val_str == \"est\":\n return str(self.effect_txt_box.text())\n elif val_str == \"lower\":\n return str(self.low_txt_box.text())\n elif val_str == \"upper\":\n return str(self.high_txt_box.text())\n elif val_str == \"prevalence\":\n return str(self.prevalence_txt_box.text())\n return None # should never happen\n \n\n def val_changed(self, val_str):\n # Backup form state\n old_ma_unit, old_table = self._save_ma_unit_and_table_state(\n table = self.two_by_two_table,\n ma_unit = self.ma_unit, \n use_old_value=False)\n old_prevalence = self.current_prevalence\n \n new_text = self._get_txt_from_val_str(val_str)\n \n no_errors, display_scale_val = self._text_box_value_is_between_bounds(val_str, new_text)\n if no_errors is False: # There are errors\n self.restore_ma_unit_and_table(old_ma_unit,old_table, old_prevalence)\n calc_fncs.block_signals(self.entry_widgets, True)\n if val_str == \"est\":\n self.effect_txt_box.setFocus()\n elif val_str == \"lower\":\n self.low_txt_box.setFocus()\n elif val_str == \"upper\":\n self.high_txt_box.setFocus()\n elif val_str == \"prevalence\":\n self.prevalence_txt_box.setFocus()\n calc_fncs.block_signals(self.entry_widgets, False)\n return\n \n # If we got to this point it means everything is ok so far \n try:\n if display_scale_val not in EMPTY_VALS:\n display_scale_val = float(display_scale_val)\n else:\n display_scale_val = None\n except ValueError:\n # a number wasn't entered; ignore\n # should probably clear out the box here, too.\n print \"fail.\"\n return None\n \n\n calc_scale_val = meta_py_r.diagnostic_convert_scale(display_scale_val,\n self.cur_effect, convert_to=\"calc.scale\")\n \n if val_str == \"est\":\n self.ma_unit.set_effect(self.cur_effect, self.group_str, calc_scale_val)\n elif val_str == \"lower\":\n self.ma_unit.set_lower(self.cur_effect, self.group_str, calc_scale_val)\n elif val_str == \"upper\":\n self.ma_unit.set_upper(self.cur_effect, self.group_str, calc_scale_val)\n elif val_str == \"prevalence\":\n pass\n\n new_ma_unit, new_table = self._save_ma_unit_and_table_state(\n table = self.two_by_two_table, ma_unit = self.ma_unit,\n use_old_value=False)\n new_prevalence = self._get_prevalence_str()\n restore_old_f = lambda: self.restore_ma_unit_and_table(old_ma_unit, old_table, old_prevalence)\n restore_new_f = lambda: self.restore_ma_unit_and_table(new_ma_unit, new_table, new_prevalence)\n command = calc_fncs.CommandFieldChanged(restore_new_f=restore_new_f, restore_old_f=restore_old_f, parent=self)\n self.undoStack.push(command)\n \n self.current_prevalence = new_prevalence\n\n def effect_changed(self):\n self.cur_effect = str(self.effect_cbo_box.currentText()) \n self.set_current_effect()\n \n self.enable_txt_box_input()\n self.enable_back_calculation_btn()\n \n def _update_raw_data(self):\n ''' populates the 2x2 table with whatever parametric data was provided '''\n self.two_by_two_table.blockSignals(True) \n field_index = 0\n for col in (0,1):\n for row in (0,1):\n val = self.ma_unit.get_raw_data_for_group(self.group_str)[field_index]\n if val is not None:\n try:\n val = str(int(val))\n except:\n val = str(val)\n item = QTableWidgetItem(val)\n self.two_by_two_table.setItem(row, col, item)\n field_index+=1\n self.two_by_two_table.blockSignals(False)\n\n def _populate_effect_cmbo_box(self):\n # for now we only back-calculate from sens/spec\n effects = BACK_CALCULATABLE_DIAGNOSTIC_EFFECTS # TODO add more metrics\n self.effect_cbo_box.blockSignals(True)\n self.effect_cbo_box.addItems(effects)\n self.effect_cbo_box.blockSignals(False)\n self.effect_cbo_box.setCurrentIndex(0)\n \n def set_current_effect(self):\n '''Fill in effect text boxes with data from ma_unit'''\n txt_boxes = dict(effect=self.effect_txt_box, lower=self.low_txt_box, upper=self.high_txt_box)\n calc_fncs.helper_set_current_effect(ma_unit=self.ma_unit,\n txt_boxes=txt_boxes, current_effect=self.cur_effect,\n group_str=self.group_str, data_type=\"diagnostic\", mult=self.mult)\n \n def print_effects_dict_from_ma_unit(self):\n print self.ma_unit.get_effects_dict()\n\n def _update_data_table(self):\n '''Try to calculate rest of 2x2 table from existing cells'''\n \n calc_fncs.block_signals(self.entry_widgets, True)\n \n params = self._get_table_vals()\n computed_params = calc_fncs.compute_2x2_table(params)\n print \"Computed Params\", computed_params\n if computed_params:\n self._set_vals(computed_params) # computed --> table widget\n \n # Compute prevalence if possible\n if (not computed_params['c1sum'] in EMPTY_VALS) and (not computed_params['total'] in EMPTY_VALS):\n prevalence = float(computed_params['c1sum'])/float(computed_params['total'])\n prev_str = str(prevalence)[:7]\n self.prevalence_txt_box.setText(\"%s\" % prev_str)\n self.enable_txt_box_input()\n \n calc_fncs.block_signals(self.entry_widgets, False)\n \n def clear_column(self,col):\n '''Clears out column in table and ma_unit'''\n \n print(\"Clearing column %d\" % col)\n for row in range(3):\n self._set_val(row, col, None) \n \n self._update_ma_unit()\n \n def clear_form(self):\n # For undo/redo\n old_ma_unit, old_table = self._save_ma_unit_and_table_state(\n table = self.two_by_two_table,\n ma_unit = self.ma_unit, \n use_old_value=False)\n old_prevalence = self._get_prevalence_str()\n \n keys = [\"c11\", \"c12\", \"r1sum\", \"c21\", \"c22\", \"r2sum\", \"c1sum\", \"c2sum\", \"total\"]\n blank_vals = dict( zip(keys, [\"\"]*len(keys)) )\n\n self._set_vals(blank_vals)\n self._update_ma_unit()\n \n # clear out effects stuff\n for metric in DIAGNOSTIC_METRICS:\n self.ma_unit.set_effect_and_ci(metric, self.group_str, None, None, None, mult=self.mult)\n \n # clear line edits\n self.set_current_effect()\n self.prevalence_txt_box.blockSignals(True)\n self.prevalence_txt_box.setText(\"\")\n self.prevalence_txt_box.blockSignals(False)\n\n calc_fncs.reset_table_item_flags(self.two_by_two_table)\n #self.enable_txt_box_input()\n \n new_ma_unit, new_table = self._save_ma_unit_and_table_state(\n table = self.two_by_two_table, ma_unit = self.ma_unit,\n use_old_value=False)\n new_prevalence = self._get_prevalence_str()\n restore_old_f = lambda: self.restore_ma_unit_and_table(old_ma_unit, old_table, old_prevalence)\n restore_new_f = lambda: self.restore_ma_unit_and_table(new_ma_unit, new_table, new_prevalence)\n command = calc_fncs.CommandFieldChanged(restore_new_f=restore_new_f, restore_old_f=restore_old_f, parent=self)\n self.undoStack.push(command)\n \n def enable_txt_box_input(self):\n ''' Enables text boxes if they are empty, disables them otherwise '''\n \n #meta_globals.enable_txt_box_input(self.effect_txt_box, self.low_txt_box,\n # self.high_txt_box, self.prevalence_txt_box)\n pass\n \n# def set_clear_btn_color(self):\n# if calc_fncs._input_fields_disabled(self.two_by_two_table, self.text_boxes):\n# self.clear_Btn.setPalette(self.pushme_palette)\n# else:\n# self.clear_Btn.setPalette(self.orig_palette)\n \n def enable_back_calculation_btn(self, engage = False):\n # For undo/redo\n old_ma_unit, old_table = self._save_ma_unit_and_table_state(\n table = self.two_by_two_table,\n ma_unit = self.ma_unit, \n use_old_value=False)\n old_prevalence = self._get_prevalence_str()\n \n def build_dict():\n d = {}\n\n for effect in BACK_CALCULATABLE_DIAGNOSTIC_EFFECTS: \n est,lower,upper = self.ma_unit.get_effect_and_ci(effect,\n self.group_str,\n self.mult)\n conv_to_disp_scale = lambda x: meta_py_r.diagnostic_convert_scale(x, effect, convert_to=\"display.scale\")\n d_est,d_lower,d_upper = [conv_to_disp_scale(x) for x in [est,lower,upper]]\n for i,Rsubkey in enumerate([\"\",\".lb\",\".ub\"]):\n try:\n d[\"%s%s\" % (effect.lower(), Rsubkey)] = float([d_est,d_lower,d_upper][i])\n except:\n pass\n \n x = self.getTotalSubjects()\n d[\"total\"] = float(x) if is_a_float(x) else None\n\n x = self.prevalence_txt_box.text()\n d[\"prev\"] = float(x) if is_a_float(x) else None\n\n d[\"conf.level\"] = self.global_conf_level\n \n # now grab the raw data, if available\n d.update(self.get_raw_diag_data())\n \n return d\n \n def new_data(diag_data, imputed):\n new_data = (imputed[\"TP\"],\n imputed[\"FP\"],\n imputed[\"FN\"],\n imputed[\"TN\"])\n old_data = (self._get_int(0,0),\n self._get_int(0,1),\n self._get_int(1,0),\n self._get_int(1,1),\n )\n isBlank = lambda x: x in EMPTY_VALS\n new_item_available = lambda old, new: isBlank(old) and not isBlank(new)\n comparison = [new_item_available(old_data[i], new_data[i]) for i in range(len(new_data))]\n print(\"Comparison:\", comparison)\n if any(comparison):\n changed = True\n else:\n changed = False\n return changed\n \n diag_data = build_dict()\n print(\"Diagnostic Data for back-calculation: \", diag_data)\n\n #if diag_data is not None:\n \n imputed = meta_py_r.impute_diag_data(diag_data)\n print \"imputed data: %s\" % imputed\n \n # Leave if nothing was imputed\n if not (imputed[\"TP\"] or imputed[\"TN\"] or imputed[\"FP\"] or imputed[\"FN\"]):\n print(\"Nothing could be imputed\")\n self.back_calc_Btn.setEnabled(False)\n return None\n \n if new_data(diag_data, imputed):\n self.back_calc_Btn.setEnabled(True)\n else:\n self.back_calc_Btn.setEnabled(False)\n #self.set_clear_btn_color()\n \n if not engage:\n return None\n ########################################################################\n # Actually do stuff with imputed data here if we are 'engaged'\n ########################################################################\n self.update_2x2_table(imputed)\n self._update_data_table()\n self._update_ma_unit()\n #self.set_clear_btn_color()\n \n # For undo/redo\n new_ma_unit, new_table = self._save_ma_unit_and_table_state(\n table = self.two_by_two_table, ma_unit = self.ma_unit,\n use_old_value=False)\n new_prevalence = self._get_prevalence_str()\n restore_old_f = lambda: self.restore_ma_unit_and_table(old_ma_unit, old_table, old_prevalence)\n restore_new_f = lambda: self.restore_ma_unit_and_table(new_ma_unit, new_table, new_prevalence)\n command = calc_fncs.CommandFieldChanged(restore_new_f=restore_new_f, restore_old_f=restore_old_f, parent=self)\n self.undoStack.push(command)\n \n ####### Undo framework ############\n def undo(self):\n print(\"undoing....\")\n self.undoStack.undo()\n \n def redo(self):\n print(\"redoing....\")\n self.undoStack.redo()\n #################################" }, { "alpha_fraction": 0.6669793725013733, "alphanum_fraction": 0.698874294757843, "avg_line_length": 37.07143020629883, "blob_id": "35fe55223a2644920729e81fd87eb25b76af8a6c", "content_id": "0ea9104c786aec53d5875f89304dad28aba6b4d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2132, "license_type": "no_license", "max_line_length": 88, "num_lines": 56, "path": "/src/forms/ui_tom_form.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'tom_form.ui'\n#\n# Created: Thu May 16 14:00:16 2013\n# by: PyQt4 UI code generator 4.10\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt4 import QtCore, QtGui\n\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n def _fromUtf8(s):\n return s\n\ntry:\n _encoding = QtGui.QApplication.UnicodeUTF8\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig, _encoding)\nexcept AttributeError:\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig)\n\nclass Ui_Dialog(object):\n def setupUi(self, Dialog):\n Dialog.setObjectName(_fromUtf8(\"Dialog\"))\n Dialog.resize(350, 495)\n sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(Dialog.sizePolicy().hasHeightForWidth())\n Dialog.setSizePolicy(sizePolicy)\n Dialog.setMinimumSize(QtCore.QSize(350, 495))\n Dialog.setMaximumSize(QtCore.QSize(350, 495))\n self.label = QtGui.QLabel(Dialog)\n self.label.setGeometry(QtCore.QRect(0, 0, 360, 500))\n sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())\n self.label.setSizePolicy(sizePolicy)\n self.label.setMinimumSize(QtCore.QSize(360, 500))\n self.label.setMaximumSize(QtCore.QSize(360, 500))\n self.label.setText(_fromUtf8(\"\"))\n self.label.setPixmap(QtGui.QPixmap(_fromUtf8(\":/misc/tom.jpg\")))\n self.label.setObjectName(_fromUtf8(\"label\"))\n\n self.retranslateUi(Dialog)\n QtCore.QMetaObject.connectSlotsByName(Dialog)\n\n def retranslateUi(self, Dialog):\n Dialog.setWindowTitle(_translate(\"Dialog\", \"Hello, I\\'m Tom!\", None))\n\nimport icons_rc\n" }, { "alpha_fraction": 0.6171342730522156, "alphanum_fraction": 0.6237494945526123, "avg_line_length": 39.75431442260742, "blob_id": "d9542b125c27b30908512948d02765513ac85242", "content_id": "e89a86f8db4b7752415b5157bc352b82b25e561d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 30687, "license_type": "no_license", "max_line_length": 207, "num_lines": 753, "path": "/src/R/openmetar/R/utilities.r", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "####################################\n# #\n# OpenMeta[Analyst] #\n# ---- #\n# utilities.r #\n# #\n# Utilities for pretty-printing #\n# results. #\n####################################\n\n\nprint.summary.display <- function(summary.disp,...) {\n #\n # Prints a summary results\n # summary.disp is a list containing the following named items\n # - model.title - a string that appears at the top of the summary.\n # - table.titles - a vector of titles for the results tables\n # Setting a table title to NA prevents the table from being printed.\n # - arrays - a list of arrays, of the same length as table.titles,\n # which are pretty-printed by print.summary.data \n #\n cat(summary.disp$model.title)\n cat(\"\\n\\n\")\n arrays <- summary.disp$arrays\n count = 1\n for (name in arrays) {\n if (!is.na(summary.disp$table.titles[count])) {\n cat(summary.disp$table.titles[count])\n cat(\"\\n\")\n print.summary.data(name)\n cat(\"\\n\")\n }\n count = count + 1\n }\n}\n\nprint.summary.data <- function(table.data,...) {\n # Prints an array table.data.\n num.rows <- length(table.data[,1])\n num.cols <- length(table.data[1,])\n # Compute column widths\n extra.col.spaces <- 2\n #table.line <- \" \"\n # This was for the old table lines.\n col.widths <- NULL\n for (col.index in 1:num.cols) {\n width <- max(nchar(table.data[,col.index])) + extra.col.spaces\n col.widths <- c(col.widths, max(nchar(table.data[,col.index])) + extra.col.spaces)\n #spaces <- rep(\" \", width)\n #dash.line <- create.repeat.string(\"-\", width)\n #table.line <- paste(table.line, spaces, \" \", sep=\"\")\n }\n table.width <- sum(col.widths) + num.cols + 1\n # Build table\n #cat(table.line)\n cat(\"\\n\")\n \n for (row.index in 1:num.rows) {\n study.name <- table.data[row.index, 1]\n # Study names are aligned left\n end.num <- col.widths[1] - nchar(study.name) -1\n table.row <- pad.with.spaces(study.name, 1, end.num)\n if (num.cols > 1) {\n for (col.index in 2:num.cols) {\n # Data is aligned right\n col.width <- col.widths[col.index]\n entry <- table.data[row.index,col.index]\n # pad entries with spaces to align columns.\n end.num <- ceiling((col.width - nchar(entry))/2)\n pos.num.check <- ((row.index>1) & (regexpr(\"-\", entry)!=1) & (regexpr(\"<\", entry)!=1))\n if (!(is.na(pos.num.check)) && pos.num.check) {\n # entry is a positive number so add extra space to align decimal sign.\n entry <- paste(\" \", entry, sep=\"\")\n } \n begin.num <- floor((col.width - nchar(entry))/2)\n end.num <- col.width - begin.num - nchar(entry)\n padded.entry <- pad.with.spaces(entry, begin.num, end.num)\n table.row <- paste(table.row, padded.entry, \" \", sep=\"\")\n }\n }\n cat(table.row)\n cat(\"\\n\")\n #cat(table.line)\n cat(\"\\n\")\n }\n}\n\npad.with.spaces <- function(entry, begin.num, end.num) {\n # Adds spaces to beginning and end of entry\n repeat.string.begin <- \"\"\n if (begin.num > 0) {\n repeat.string.begin <- create.repeat.string(\" \", begin.num)\n }\n repeat.string.end <- \"\"\n if (end.num > 0) {\n repeat.string.end <- create.repeat.string(\" \", end.num)\n }\n padded.entry <- paste(repeat.string.begin, entry, repeat.string.end, sep=\"\")\n padded.entry\n}\n\ncreate.repeat.string <- function(symbol, num.repeats) {\n # creates a string in which symbol is repeated num.repeats times\n repeat.string <- NULL\n for (count in 1:num.repeats) {\n repeat.string <- paste(repeat.string, symbol, sep=\"\")\n }\n repeat.string\n}\n \nround.display <- function(x, digits) {\n digits.str <- paste(\"%.\", digits, \"f\", sep=\"\")\n x.disp <- c()\n x.disp[x < 10^(-digits)] <- paste(\"< \", 10^(-digits), sep=\"\")\n x.disp[x >= 10^(-digits)] <- sprintf(digits.str, x[x>=10^(-digits)])\n x.disp\n}\n\ng.round.display.zval <- function(x, digits) {\n # just for use in # create.subgroup.display for rounding the (single) zvals\n digits.str <- paste(\"%.\", digits, \"f\", sep=\"\")\n x.disp <- c()\n \n x.disp[x < 0 && abs(x) < 10^(-digits)] <- paste(\">\",\"-\",10^(-digits),\" & <0\",sep=\"\")\n x.disp[x < 0 && abs(x) >= 10^(-digits)] <- sprintf(digits.str, x[x < 0 && abs(x)>=10^(-digits)])\n \n x.disp[x>0 && x < 10^(-digits)] <- paste(\"< \", 10^(-digits), sep=\"\")\n x.disp[x>0 && x >= 10^(-digits)] <- sprintf(digits.str, x[x>0 && x>=10^(-digits)])\n x.disp\n}\n \n\ncreate.summary.disp <- function(om.data, params, res, model.title) {\n # create tables for diplaying summary of ma results\n digits.str <- paste(\"%.\", params$digits, \"f\", sep=\"\")\n transform.name <- get.transform.name(om.data)\n scale.str <- get.scale(params)\n tau2 <- sprintf(digits.str, res$tau2)\n degf <- res$k - 1\n if (!is.null(res$I2)) {\n I2 <- round(res$I2, digits=params$digits)\n }\n QLabel = paste(\"Q(df=\", degf, \")\", sep=\"\")\n # Set n, the vector of numbers of studies, for PFT metric.\n if (params$measure==\"PFT\" && length(om.data@g1O1) > 0 && length(om.data@g1O2) > 0) {\n n <- om.data@g1O1 + om.data@g1O2 # Number of subjects - needed for Freeman-Tukey double arcsine trans.\n }\n else {\n n <- NULL # don't need n except for PFT (freeman-tukey)\n }\n if (!is.null(res$QE)) {\n QE <- sprintf(digits.str, res$QE)\n } else {\n QE <- \"NA\"\n }\n if (!is.null(res$QEp)) {\n QEp <- round.display(x=res$QEp, digits=params$digits)\n } else {\n QEp <- \"NA\"\n }\n if (!is.null(res$pval)) {\n pVal <- round.display(res$pval, digits=params$digits)\n } else {\n pVal <- \"NA\"\n }\n\n res.title <- \" Model Results\"\n #y.disp <- sprintf(digits.str, eval(call(transform.name, params$measure))$display.scale(res$b, list(ni=n)))\n #lb.disp <- sprintf(digits.str, eval(call(transform.name, params$measure))$display.scale(res$ci.lb, list(ni=n)))\n #ub.disp <- sprintf(digits.str, eval(call(transform.name, params$measure))$display.scale(res$ci.ub, list(ni=n)))\n y.disp <- sprintf(digits.str, eval(call(transform.name, params$measure))$display.scale(res$b, ni=n))\n lb.disp <- sprintf(digits.str, eval(call(transform.name, params$measure))$display.scale(res$ci.lb, ni=n))\n ub.disp <- sprintf(digits.str, eval(call(transform.name, params$measure))$display.scale(res$ci.ub, ni=n))\n se <- sprintf(digits.str, res$se)\n\n if (res$method==\"FE\") {\n het.col.labels <- c(QLabel, \"Het. p-Value\")\n het.col.vals <- c(QE, QEp)\n het.array <- rbind(het.col.labels, het.col.vals)\n } else { \n het.col.labels <- c(\"tau^2\", QLabel, \"Het. p-Value\", \"I^2\")\n het.col.vals <- c(tau2, QE, QEp, I2)\n het.array <- rbind(het.col.labels, het.col.vals)\n }\n class(het.array) <- \"summary.data\"\n het.title <- \" Heterogeneity\"\n \n if (scale.str == \"log\" || scale.str == \"logit\" || scale.str == \"arcsine\") {\n # display and calculation scales are different - create two tables for results\n res.col.labels <- c(\"Estimate\", \"Lower bound\", \"Upper bound\",\"p-Value\")\n res.col.vals <- c(y.disp, lb.disp, ub.disp, pVal)\n res.array <- rbind(res.col.labels, res.col.vals)\n estCalc <- sprintf(digits.str, res$b)\n lbCalc <- sprintf(digits.str, res$ci.lb)\n ubCalc <- sprintf(digits.str, res$ci.ub)\n alt.col.labels <- c(\"Estimate\", \"Lower bound\", \"Upper bound\", \"Std. error\")\n alt.col.vals <- c(estCalc, lbCalc, ubCalc, se)\n alt.array <- rbind(alt.col.labels, alt.col.vals)\n alt.title <- paste(\" Results (\", scale.str, \" scale)\", sep=\"\")\n arrays <- list(arr1=res.array, arr2=het.array, arr3=alt.array)\n table.titles <- c(res.title, het.title, alt.title)\n } else {\n # display and calculation scales are the same - create one table for results\n col.labels <- c(\"Estimate\", \"Lower bound\", \"Upper bound\", \"Std. error\", \"p-Value\")\n col.vals <- c(y.disp, lb.disp, ub.disp, se, pVal)\n res.array <- rbind(col.labels, col.vals)\n arrays = list(arr1=res.array, arr2=het.array)\n table.titles <- c(res.title, het.title)\n }\n \n #if (transform.name == \"binary.transform.f\") {\n # Add raw data title and array \n # raw.data.array <- create.binary.data.array(om.data, params, res)\n # table.titles <- c(\" Study Data\", table.titles)\n # raw.data.list <- list(\"arr0\"=raw.data.array)\n # arrays <- c(raw.data.list, arrays)\n #} else if (transform.name == \"continuous.transform.f\") {\n #raw.data.array <- create.cont.data.array(om.data, params, res)\n #table.titles <- c(\" Study Data\", table.titles)\n #raw.data.list <- list(\"arr0\"=raw.data.array)\n #arrays <- c(raw.data.list, arrays)\n #}\n # Above code can be re-enabled when write.x.study.data.to.file is fixed.\n \n summary.disp <- list(\n \"model.title\" = model.title,\n \"table.titles\" = table.titles,\n \"arrays\" = arrays,\n \"MAResults\" = res)\n class(summary.disp) <- \"summary.display\"\n summary.disp\n}\n\n# @TODO should merge this with save.data below\nsave.plot.data <- function(plot.data, out.path=NULL) {\n # saves plot data to the r_tmp directory\n if (is.null(out.path)){\n # by default, we use thecurrent system time as a 'unique enough' filename\n out.path <- paste(\"r_tmp/\", \n as.character(as.numeric(Sys.time())), sep=\"\")\n }\n ### save plot data *only*\n save(plot.data, file=paste(out.path, \".plotdata\", sep=\"\"))\n out.path\n}\n\n# For OpenMEE phylogenetic forest plot\nsave.plot.data.and.params <- function(data, params, res, level, out.path=NULL) {\n # saves plot data to the r_tmp directory\n if (is.null(out.path)){\n # by default, we use thecurrent system time as a 'unique enough' filename\n out.path <- paste(\"r_tmp/\", \n as.character(as.numeric(Sys.time())), sep=\"\")\n }\n \n ### save plot data\n save(data, file=paste(out.path, \".data\", sep=\"\"))\n \n ### save params\n save(params, file=paste(out.path, \".params\", sep=\"\"))\n \n ### save res\n save(res, file=paste(out.path, \".res\", sep=\"\"))\n \n ### save level\n save(level, file=paste(out.path, \".level\", sep=\"\"))\n \n out.path\n}\n\n\nsave.data <- function(om.data, res, params, plot.data, out.path=NULL) {\n # this saves *all* the data for certain types of plots, in contrast\n # to the above method (save.plot.data), which saves only the plot.data\n # object.\n #\n # save the data, result and plot parameters to a tmp file on disk\n if (is.null(out.path)){\n # by default, we use thecurrent system time as a 'unique enough' filename\n out.path <- paste(\"r_tmp/\", \n as.character(as.numeric(Sys.time())), sep=\"\")\n }\n\n save(om.data, file=paste(out.path, \".data\", sep=\"\"))\n save(res, file=paste(out.path, \".res\", sep=\"\"))\n \n save(plot.data, file=paste(out.path, \".plotdata\", sep=\"\"))\n save(params, file=paste(out.path, \".params\", sep=\"\"))\n out.path\n}\n\ncreate.regression.display <- function(res, params, display.data) {\n \n if (is.null(params$bootstrap.type))\n bootstrap.type <- \"\"\n else\n bootstrap.type <- as.character(params$bootstrap.type) # will be null if not bootstrap\n \n \n # create table for diplaying summary of regression ma results\n cov.display.col <- display.data$cov.display.col\n levels.display.col <- display.data$levels.display.col\n studies.display.col <- display.data$studies.display.col\n # first two columns of table\n factor.n.levels <- display.data$factor.n.levels\n n.cont.covs <- display.data$n.cont.covs\n n.cont.rows <- n.cont.covs + 1 # extra row for intercept\n n.factor.covs <- length(factor.n.levels)\n n.rows <- length(cov.display.col) + 1\n # extra row for col. labels\n if (n.factor.covs==0) {\n col.labels <- switch(bootstrap.type,\n boot.meta.reg=c(\"Covariate\", \"Coefficients\", \"Lower bound\", \"Upper bound\"),\n c(\"Covariate\", \"Coefficients\", \"Lower bound\", \"Upper bound\", \"Std. error\", \"p-Value\"))\n #col.labels <- c(\"Covariate\", \"Coefficients\", \"Lower bound\", \"Upper bound\", \"Std. error\", \"p-Value\")\n } else {\n col.labels <- switch(bootstrap.type,\n boot.meta.reg=col.labels <- c(\"Covariate\", \"Level\", \"Studies\", \"Coefficients\", \"Lower bound\", \"Upper bound\"),\n col.labels <- c(\"Covariate\", \"Level\", \"Studies\", \"Coefficients\", \"Lower bound\", \"Upper bound\", \"Std. error\", \"p-Value\"))\n #col.labels <- c(\"Covariate\", \"Level\", \"Studies\", \"Coefficients\", \"Lower bound\", \"Upper bound\", \"Std. error\", \"p-Value\")\n }\n \n reg.array <- array(dim=c(length(cov.display.col)+1, length(col.labels)), dimnames=list(NULL, col.labels))\n reg.array[1,] <- col.labels\n digits.str <- paste(\"%.\", params$digits, \"f\", sep=\"\")\n coeffs <- sprintf(digits.str, res$b)#; print(paste(c(\"coeffs:\", coeffs))); ###\n if (bootstrap.type!=\"boot.meta.reg\") {\n se <- round.display(res$se, digits=params$digits)\n pvals <- round.display(res$pval, digits=params$digits)\n }\n lbs <- sprintf(digits.str, res$ci.lb)\n ubs <- sprintf(digits.str, res$ci.ub)\n \n coeffs.tmp <- coeffs[1:n.cont.rows]\n # extra row for intercept\n if (bootstrap.type!=\"boot.meta.reg\") {\n se.tmp <- se[1:n.cont.rows]\n pvals.tmp <- pvals[1:n.cont.rows]\n }\n lbs.tmp <- lbs[1:n.cont.rows]\n ubs.tmp <- ubs[1:n.cont.rows]\n if (n.factor.covs > 0) {\n # there are factor covariants - insert spaces for reference var. row.\n insert.row <- n.cont.rows + 1\n print(paste(c(\"insert.row outer: \", insert.row)))\n for (count in 1:n.factor.covs) {\n n.levels <- factor.n.levels[count]\n #print(paste(c(\"n.levels\", n.levels))) #####\n coeffs.tmp <- c(coeffs.tmp,\"\", coeffs[insert.row:(insert.row + n.levels - 2)])\n if (bootstrap.type!=\"boot.meta.reg\") {\n se.tmp <- c(se.tmp,\"\", se[insert.row:(insert.row + n.levels - 2)])\n pvals.tmp <- c(pvals.tmp,\"\",pvals[insert.row:(insert.row + n.levels - 2)])\n }\n lbs.tmp <- c(lbs.tmp,\"\",lbs[insert.row:(insert.row + n.levels - 2)])\n ubs.tmp <- c(ubs.tmp,\"\",ubs[insert.row:(insert.row + n.levels - 2)])\n insert.row <- insert.row + n.levels - 1\n #print(paste(c(\"insert.row after: \", insert.row))) ######\n } \n reg.array[2:n.rows, \"Level\"] <- levels.display.col\n reg.array[2:n.rows, \"Studies\"] <- studies.display.col\n }\n\n \n \n # add data to array\n reg.array[2:n.rows,\"Covariate\"] <- cov.display.col\n reg.array[2:n.rows,\"Coefficients\"] <- coeffs.tmp\n reg.array[2:n.rows, \"Lower bound\"] <- lbs.tmp\n reg.array[2:n.rows, \"Upper bound\"] <- ubs.tmp\n if (bootstrap.type!=\"boot.meta.reg\") {\n reg.array[2:n.rows,\"Std. error\"] <- se.tmp\n reg.array[2:n.rows, \"p-Value\"] <- pvals.tmp\n \n omnibus.pval.array <- array(dim=c(1,1))\n omnibus.pval.array[1,1] <- sprintf(digits.str, res$QMp)\n arrays <- list(arr1=reg.array, arr2=omnibus.pval.array)\n } else {\n arrays <- list(arr1=reg.array)\n }\n \n metric.name <- pretty.metric.name(as.character(params$measure)) \n \n if (bootstrap.type!=\"boot.meta.reg\") {\n model.title <- paste(\"Meta-Regression\\n\\nMetric: \", metric.name, sep=\"\")\n reg.disp <- list(\"model.title\" = model.title, \"table.titles\" = c(\"Model Results\", \"Omnibus p-Value\"), \"arrays\" = arrays, \"MAResults\" = res)\n } else {\n model.title <- paste(\"Bootstrapped Meta-Regression based on \", params$num.bootstrap.replicates, \" replicates.\\n\\n\", params$extra.attempts, \" resampling attempts failed.\\n\\nMetric: \", metric.name, sep=\"\")\n reg.disp <- list(\"model.title\" = model.title, \"table.titles\" = c(\"Model Results\"), \"arrays\" = arrays, \"MAResults\" = res)\n }\n \n\n class(reg.disp) <- \"summary.display\"\n return(reg.disp)\n}\n\ncreate.overall.display <- function(res, study.names, params, model.title, data.type) {\n # create tables for diplaying summary of meta-methods (cumulative and leave-one-out) results.\n if (data.type == \"continuous\") {\n transform.name <- \"continuous.transform.f\"\n } else if (data.type == \"diagnostic\") {\n transform.name <- \"diagnostic.transform.f\"\n } else { \n transform.name <- \"binary.transform.f\"\n }\n scale.str <- get.scale(params)\n overall.array <- array(dim=c(length(study.names) + 1, 6))\n #QLabel = paste(\"Q(df = \", degf, \")\", sep=\"\")\n \n overall.array[1,] <- c(\"Studies\", \"Estimate\", \"Lower bound\", \"Upper bound\", \"Std. error\", \"p-Val\")\n \n # unpack the data\n for (count in 1:length(res)) {\n y <- res[[count]]$b\n lb <- res[[count]]$ci.lb\n ub <- res[[count]]$ci.ub\n se <- res[[count]]$se\n digits.str <- paste(\"%.\", params$digits, \"f\", sep=\"\")\n y.disp <- sprintf(digits.str, eval(call(transform.name, params$measure))$display.scale(y, n=NULL))\n lb.disp <- sprintf(digits.str, eval(call(transform.name, params$measure))$display.scale(lb, n=NULL))\n ub.disp <- sprintf(digits.str, eval(call(transform.name, params$measure))$display.scale(ub, n=NULL))\n se.disp <- sprintf(digits.str, se)\n \n if (!is.null(res[[count]]$pval)) {\n pVal <- round.display(res[[count]]$pval, digits=params$digits)\n } else {\n pVal <- \"NA\"\n }\n overall.array[count+1,] <- c(study.names[count], y.disp, lb.disp, ub.disp, se.disp, pVal)\n }\n\n table.titles <- c(\" Model Results\")\n arrays <- list(arr1=overall.array)\n overall.disp <- list(\"model.title\" = model.title, \"table.titles\" = table.titles, \"arrays\" = arrays,\n \"MAResults\" = res )\n class(overall.disp) <- \"summary.display\"\n overall.disp\n}\n\ncreate.subgroup.display <- function(res, study.names, params, model.title, data.type) {\n # create table for diplaying summary of overall ma results\n if (data.type == \"continuous\") {\n transform.name <- \"continuous.transform.f\"\n } else if (data.type == \"diagnostic\") {\n transform.name <- \"diagnostic.transform.f\"\n } else { \n transform.name <- \"binary.transform.f\"\n }\n scale.str <- \"standard\"\n if (metric.is.log.scale(params$measure)){\n scale.str <- \"log\" \n } else if (metric.is.logit.scale(params$measure)) {\n scale.str <- \"logit\"\n }\n subgroup.array <- array(dim=c(length(study.names) + 1, 8))\n het.array <- array(dim=c(length(study.names) + 1, 4))\n #QLabel = paste(\"Q(df = \", degf, \")\", sep=\"\")\n\n # hmm....\n n <- length(study.names)\n\n subgroup.array[1,] <- c(\"Subgroups\", \"Studies\", \"Estimate\", \"Lower bound\", \"Upper bound\", \"Std. error\", \"p-Val\", \"z-Val\")\n het.array[1,] <- c(\"Studies\", \"Q (df)\",\n \"Het. p-Val\", \"I^2\")\n # unpack the data\n for (count in 1:length(study.names)) {\n num.studies <- res[[count]]$k\n y <- res[[count]]$b\n lb <- res[[count]]$ci.lb\n ub <- res[[count]]$ci.ub\n se <- res[[count]]$se\n digits.str <- paste(\"%.\", params$digits, \"f\", sep=\"\")\n y.disp <- sprintf(digits.str, eval(call(transform.name, params$measure))$display.scale(y, n))\n lb.disp <- sprintf(digits.str, eval(call(transform.name, params$measure))$display.scale(lb, n))\n ub.disp <- sprintf(digits.str, eval(call(transform.name, params$measure))$display.scale(ub, n))\n se.disp <- sprintf(digits.str, se)\n if (!is.null(res[[count]]$QE)) {\n degf <- res[[count]]$k - 1\n QE <- sprintf(digits.str, res[[count]]$QE)\n QE <- paste(QE, \" (\", degf,\")\", sep=\"\")\n } else {\n QE <- \"NA\"\n }\n if (!is.null(res[[count]]$I2)) {\n I2 <- paste(round(res[[count]]$I2, digits = 2), \"%\")\n } else {\n I2 <- \"NA\"\n }\n if (!is.null(res[[count]]$QEp)) {\n QEp <- round.display(x=res[[count]]$QEp, digits=params$digits)\n } else {\n QEp <- \"NA\"\n }\n if (!is.null(res[[count]]$pval)) {\n pVal <- round.display(res[[count]]$pval, digits=params$digits)\n } else {\n pVal <- \"NA\"\n }\n if (!is.null(res[[count]]$zval)) {\n zVal <- g.round.display.zval(res[[count]]$zval, digits=params$digits)\n } else {\n zVal <- \"NA\"\n }\n\n # very hacky fix to issue where the function would die below. For some\n # reason when there is only a single study, the num.studies is NULL instead\n # of one. This isn't really a bug with this function but rather should be\n # solved elsewhere.....\n if (is.null(num.studies))\n num.studies <- 1\n \n subgroup.array[count+1,] <- c(study.names[count], num.studies, y.disp, lb.disp, ub.disp, se.disp, pVal, zVal)\n het.array[count+1,] <- c(study.names[count], QE, QEp, I2)\n }\n\n table.titles <- c(\" Model Results\", \" Heterogeneity\")\n arrays <- list(arr1=subgroup.array, arr2=het.array)\n #}\n subgroup.disp <- list(\"model.title\" = model.title, \"table.titles\" = table.titles, \"arrays\" = arrays,\n \"MAResults\" = res )\n class(subgroup.disp) <- \"summary.display\"\n subgroup.disp\n}\n\n\nresults.short.list <- function(res) {\n # extracts res$b, res$ci.lb, and res$ci.ub from res\n res.short <- list(\"b\"=res$b[1], \"ci.lb\"=res$ci.lb, \"ci.ub\"=res$ci.ub)\n}\n\ncalc.ci.bounds <- function(om.data, params, ...) {\n # Calulate confidence interval bounds using normal approximation.\n y <- om.data@y\n se <- om.data@SE\n alpha <- 1.0-(params$conf.level/100.0)\n mult <- abs(qnorm(alpha/2.0))\n lb <- y - mult*om.data@SE\n ub <- y + mult*om.data@SE\n extra.args <- list(...)\n # Check that bounds are in the range of the transformation and truncate if necessary.\n if (params$measure==\"PR\") {\n for (i in 1:length(lb)) { \n lb[i] <- max(lb[i], 0)\n ub[i] <- min(ub[i], 1)\n }\n }\n if (params$measure==\"PAS\") {\n for (i in 1:length(lb)) { \n lb[i] <- max(lb[i], asin(0))\n ub[i] <- min(ub[i], asin(1))\n }\n }\n if (params$measure==\"PFT\") {\n n <- extra.args[['ni']]\n for (i in 1:length(lb)) { \n lb[i] <- max(lb[i], transf.pft(0, n[i]))\n ub[i] <- min(ub[i], transf.pft(1, n[i]))\n }\n } \n\n study.ci.bounds <- list(lb=lb, ub=ub)\n}\n\nwrite.results.to.file <- function(om.data, params, res, outpath) {\n # write results to file\n transform.name <- get.transform.name(om.data) \n results.df <- data.frame(\"Summary.estimate\" = eval(call(transform.name, params$measure))$display.scale(res$b, n),\n \"Lower.bound\" = eval(call(transform.name, params$measure))$display.scale(res$ci.lb, n),\n \"Upper.bound\" = eval(call(transform.name, params$measure))$display.scale(res$ci.ub, n),\n \"p-Value\" = res$pval)\n write.csv(results.df, file=outpath, row.names=FALSE)\n}\n\nget.transform.name <- function(om.data) { \n # Get transform name for converting between display and calculation scales \n if (\"ContinuousData\" %in% class(om.data)) {\n transform.name <-\"continuous.transform.f\"\n data.type <- \"continuous\"\n } else if (\"DiagnosticData\" %in% class(om.data)) {\n transform.name <- \"diagnostic.transform.f\"\n data.type <- \"diagnostic\"\n } else if (\"BinaryData\" %in% class(om.data)) {\n transform.name <- \"binary.transform.f\"\n data.type <- \"binary\"\n }\n transform.name\n}\n\nget.scale <- function(params) {\n # Get the transformation scale\n if (metric.is.log.scale(params$measure)){\n scale <- \"log\" \n } else if (metric.is.logit.scale(params$measure)) {\n scale <- \"logit\"\n } else if (metric.is.arcsine.scale(params$measure)) {\n scale <- \"arcsine\"\n } else {\n scale <- \"standard\"\n }\n scale\n}\n\nmetric.is.log.scale <- function(metric){\n metric %in% c(binary.log.metrics, diagnostic.log.metrics) \n}\n\nmetric.is.logit.scale <- function(metric) {\n metric %in% c(binary.logit.metrics, diagnostic.logit.metrics)\n} \n\nmetric.is.arcsine.scale <- function(metric) {\n metric %in% c(binary.arcsine.metrics)\n}\n\nmetric.is.freeman_tukey.scale <- function(metric) {\n metric %in% c(binary.freeman_tukey.metrics)\n}\n\nlogit <- function(x) {\n log(x/(1-x))\n}\n\ninvlogit <- function(x) {\n exp(x) / (1 + exp(x))\n}\n\narcsine.sqrt <- function(x) {\n asin(sqrt(x))\n}\n\ninvarcsine.sqrt <- function(x) {\n (sin(x))^2\n}\n\nfreeman_tukey <- function(x,n) {\n if (length(x)==1) {\n hm <- 1/mean(1/n)\n y <- transf.pft(xi=x, ni=hm)\n } else {\n y <- transf.pft(xi=x, ni=n)\n }\n y\n}\n\ninvfreeman_tukey <- function(x, n) {\n # n is either a \n if (length(x)==1) {\n y <- transf.ipft.hm(xi=x, targs=list(ni=n))\n } else {\n y <- transf.ipft(x, n)\n }\n \n y\n # See \"The Inverse of the Freeman-Tukey Double Arcsine Transformations,\"\n # The American Statistician, Nov. 1978, Vol. 32, No. 4.\n \n #p <- 0.5 * (1 - sign(cos(2*x)) * (1 - (sin(2*x) + (sin(2*x) - 1/sin(2*x)) / n)^2)^0.5)\n}\n\n\nrma.uni.value.info <- function() {\n list(\n b = list(type=\"vector\", description='estimated coefficients of the model.'),\n se = list(type=\"vector\", description='standard errors of the coefficients.'),\n zval = list(type=\"vector\", description='test statistics of the coefficients.'),\n pval = list(type=\"vector\", description='p-values for the test statistics.'),\n ci.lb = list(type=\"vector\", description='lower bound of the confidence intervals for the coefficients.'),\n ci.ub = list(type=\"vector\", description='upper bound of the confidence intervals for the coefficients.'),\n vb = list(type=\"vector\", description='variance-covariance matrix of the estimated coefficients.'),\n tau2 = list(type=\"vector\", description='estimated amount of (residual) heterogeneity. Always 0 when method=\"FE\".'),\n se.tau2 = list(type=\"vector\", description='estimated standard error of the estimated amount of (residual) heterogeneity.'),\n k = list(type=\"vector\", description='number of outcomes included in the model fitting.'),\n p = list(type=\"vector\", description='number of coefficients in the model (including the intercept).'),\n m = list(type=\"vector\", description='number of coefficients included in the omnibus test of coefficients.'),\n QE = list(type=\"vector\", description='test statistic for the test of (residual) heterogeneity.'),\n QEp = list(type=\"vector\", description='p-value for the test of (residual) heterogeneity.'),\n QM = list(type=\"vector\", description='test statistic for the omnibus test of coefficients.'),\n QMp = list(type=\"vector\", description='p-value for the omnibus test of coefficients.'),\n I2 = list(type=\"vector\", description='value of I2. See print.rma.uni for more details.'),\n H2 = list(type=\"vector\", description='value of H2. See print.rma.uni for more details.'),\n R2 = list(type=\"vector\", description='value of R2. See print.rma.uni for more details.'),\n int.only = list(type=\"vector\", description='logical that indicates whether the model is an intercept-only model.'),\n yi = list(type=\"vector\", description='the vector of outcomes'),\n vi = list(type=\"vector\", description='the corresponding sample variances'),\n X = list(type=\"matrix\", description='the model matrix of the model'),\n fit.stats= list(type=\"data.frame\", description='a list with the log-likelihood, deviance, AIC, BIC, and AICc values under the unrestricted and restricted likelihood.'),\n\n # not part of rma.uni output\n weights = list(type=\"vector\", description=\"weights in % given to the observed effects\")\n )\n}\n\ncumul.rma.uni.value.info <- function() {\n list(\n estimate = list(type=\"vector\", description='estimated coefficients of the model.'),\n se = list(type=\"vector\", description='standard errors of the coefficients. NA if transf is used to transform the coefficients.'),\n zval = list(type=\"vector\", description='test statistics of the coefficients.'),\n pval = list(type=\"vector\", description='p-values for the test statistics.'),\n ci.lb = list(type=\"vector\", description='lower bounds of the confidence intervals for the coefficients.'), \n ci.ub = list(type=\"vector\", description='upper bounds of the confidence intervals for the coefficients.'),\n QE = list(type=\"vector\", description='test statistics for the tests of heterogeneity.'),\n QEp = list(type=\"vector\", description='p-values for the tests of heterogeneity.'),\n tau2 = list(type=\"vector\", description='estimated amounts of (residual) heterogeneity (only for random-effects models).'),\n I2 = list(type=\"vector\", description='values of I2 .'),\n H2 = list(type=\"vector\", description='values of H2 .') \n )\n}\n\ncumul.rma.mh.value.info <- function () {\n list(\n estimate = list(type=\"vector\", description='estimated coefficients of the model.'),\n se = list(type=\"vector\", description='standard errors of the coefficients. NA if transf is used to transform the coefficients.'),\n zval = list(type=\"vector\", description='test statistics of the coefficients.'),\n pval = list(type=\"vector\", description='p-values for the test statistics.'),\n ci.lb = list(type=\"vector\", description='lower bounds of the confidence intervals for the coefficients.'), \n ci.ub = list(type=\"vector\", description='upper bounds of the confidence intervals for the coefficients.'),\n QE = list(type=\"vector\", description='test statistics for the tests of heterogeneity.'),\n QEp = list(type=\"vector\", description='p-values for the tests of heterogeneity.')\n )\n}\n\nloo.rma.uni.value.info <- function () {\n list(\n estimate = list(type=\"vector\", description='estimated coefficients of the model.'),\n se = list(type=\"vector\", description='standard errors of the coefficients. NA if transf is used to transform the coefficients.'),\n zval = list(type=\"vector\", description='test statistics of the coefficients.'),\n pval = list(type=\"vector\", description='p-values for the test statistics.'),\n ci.lb = list(type=\"vector\", description='lower bounds of the confidence intervals for the coefficients.'), \n ci.ub = list(type=\"vector\", description='upper bounds of the confidence intervals for the coefficients.'),\n Q = list(type=\"vector\", description='test statistics for the tests of heterogeneity.'),\n Qp = list(type=\"vector\", description='p-values for the tests of heterogeneity.'),\n tau2 = list(type=\"vector\", description='estimated amounts of (residual) heterogeneity (only for random-effects models).'),\n I2 = list(type=\"vector\", description='values of I2 .'),\n H2 = list(type=\"vector\", description='values of H2 .') \n )\n}\n\nloo.rma.mh.value.info <- function () {\n list(\n estimate = list(type=\"vector\", description='estimated coefficients of the model.'),\n se = list(type=\"vector\", description='standard errors of the coefficients. NA if transf is used to transform the coefficients.'),\n zval = list(type=\"vector\", description='test statistics of the coefficients.'),\n pval = list(type=\"vector\", description='p-values for the test statistics.'),\n ci.lb = list(type=\"vector\", description='lower bounds of the confidence intervals for the coefficients.'), \n ci.ub = list(type=\"vector\", description='upper bounds of the confidence intervals for the coefficients.'),\n Q = list(type=\"vector\", description='test statistics for the tests of heterogeneity.'),\n Qp = list(type=\"vector\", description='p-values for the tests of heterogeneity.')\n )\n}\n\ncapture.output.and.collapse <- function (x) {\n output <- paste(capture.output(x), collapse=\"\\n\")\n output\n}" }, { "alpha_fraction": 0.5266836881637573, "alphanum_fraction": 0.591304361820221, "avg_line_length": 34.98159408569336, "blob_id": "406a2a535ccca8b206302fddd1094951726ee74c", "content_id": "602d651879835a3221661a5dbfab31fd3f6241be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 5865, "license_type": "no_license", "max_line_length": 175, "num_lines": 163, "path": "/src/R/openmetar_unit_tests/openma.tests.r", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "\nlibrary(openmetar)\n\ntest.all <- function() {\n try.errors <- array(list())\n \n params <- set.params(data.type=\"binary\")\n binary.data <- create.binary.data(params)\n try.errors <- test.binary.functions(binary.data, params, try.errors)\n \n cont.data <- create.cont.data(params)\n try.errors <- test.cont.functions(cont.data, params, try.errors)\n \n params <- set.params(data.type=\"diagnostic\")\n diagnostic.data <- create.diag.data(params)\n try.errors <- test.diag.functions(diagnostic.data, params, try.errors)\n try.errors\n} \n\ncreate.binary.data <- function(params) {\n ai<- c(4,6,3,62,33,180,8,505)\n n1i<-c(123,306,231,13598,5069,1541,2545,88391)\n bi<-n1i - ai\n ci<-c(11,29,11,248,47,372,10,499)\n n2i<-c(139,303,220,12867,5808,1451,629,88391)\n di<-n2i-ci\n binary.data <- new('BinaryData', g1O1=ai, g1O2=bi,\n g2O1=ci, g2O2=di,\n study.names=c('Aaronson', 'Ferguson', 'Rosenthal', 'Hart', 'Frimodt-Moller', 'Stein', 'Vandiviere', 'TPT Madras'),\n covariates=list(latitude=c(44,55,42,52,13,44,19,13), groups=c('1','1','2','2','1','2','2','1')))\n \n res <- compute.for.one.bin.study(binary.data, params) \n binary.data@y <- res$yi\n binary.data@SE <- sqrt(res$vi)\n binary.data\n}\n\ncreate.binary.fnames <- function() {\n binary.fnames <- c(\"binary.fixed.inv.var\", \"binary.fixed.mh\", \n \"binary.fixed.peto\", \"binary.random\")\n} \n\ncreate.binary.meta.fnames <- function() {\n # binary meta functions except for subgroup\n binary.meta.fnames <- c(\"cumul.ma.binary\", \"loo.ma.binary\")\n}\n\ncreate.cont.data <- function(params) {\n N1 <- c(60,65,40,200,50,85)\n mean1 <- c(94,98,98,94,98,96)\n sd1 <- c(22,21,28,19,21,21)\n N2 <- c(60,65,40,200,45,85)\n mean2 <- c(92,92,88,82,88,92)\n sd2 <- c(20,22,26,17,22,22)\n cont.data <- new('ContinuousData', N1=N1, mean1=mean1, sd1=sd1,\n N2=N2, mean2=mean2, sd2=sd2,\n study.names=c(\"Carroll\", \"Grant\", \"Peck\", \"Donat\", \"Stewart\", \"Young\"),\n covariates=list(groups=c('1','1','2','1','2','2')))\n res <- compute.for.one.cont.study(cont.data, params) \n cont.data@y <- res$yi\n cont.data@SE <- sqrt(res$vi)\n cont.data\n}\n\ncreate.cont.fnames <- function() {\n cont.fnames <- c(\"continuous.fixed\", \"continuous.random\")\n}\n\ncreate.diag.data <- function(params) {\n diagnostic.data <- new('DiagnosticData', TP=c(81, 16, 8, 4, 15, 12, 1, 18, 18, 112, 14, 7, 57, 7, 6, 18, 2, 1, 37, 4), \n FN=c(5, 0, 0, 0, 2, 3, 0, 0, 3, 2, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0),\n TN=c(186, 48, 14, 30, 15, 34, 16, 32, 13, 414, 33, 44, 26, 14, 24, 96, 18, 4, 91, 18), \n FP=c(273, 90, 98, 11, 35, 51, 11, 125, 55, 601, 57, 127, 35, 38, 59, 35, 21, 9, 72, 32), \n study.names=c('Hmeidan', 'Auslender', 'Botsis','Cacclatore', 'Chan', 'Dorum', 'Goldstein', 'Granberg',\n 'Hanggi', 'Karlsson (a)', 'Karlsson (b)', 'Klug', 'Malinova', 'Nasri (a)', 'Nasri (b)', 'Petrl', 'Taviani', 'Varner', 'Weigel', 'Wolman'))\n res <- get.res.for.one.diag.study(diagnostic.data,params)\n diagnostic.data@y <- res$b\n diagnostic.data@SE <- res$se \n diagnostic.data \n}\n\ncreate.diag.fnames <- function() {\n diag.fnames <- c(\"diagnostic.fixed\", \"diagnostic.random\")\n}\n\nset.params <- function(data.type) {\n params <- list(conf.level=95, digits=3)\n params$fp_show_col2 <- TRUE\n params$fp_show_col3 <- TRUE\n params$fp_show_col4 <- TRUE\n params$fp_col1_str <- \"Studies\"\n params$fp_col2_str <- \"ES (LL, UL)\"\n params$fp_col3_str <- \"Ev / Trt\"\n params$fp_col4_str <- \"Ev / Ctrl\"\n params$fp_show_summary_line <- \"TRUE\"\n params$fp_outpath <- \"./r_tmp/forest_plot.png\"\n params$adjust <- 0.5\n params$to <- \"only0\"\n params$fp_xlabel <- \"Effect size\"\n params$rm.method <- \"DL\"\n if (data.type == \"binary\") {\n params$measure <- \"OR\"\n params$fp_show_col4 <- FALSE\n } else {\n params$measure <- \"Sens\"\n params$fp_show_col4 <- FALSE\n }\n params\n}\n\ntest.binary.functions <- function(binary.data, params, try.errors) {\n binary.fnames <- create.binary.fnames()\n binary.meta.fnames <- create.binary.meta.fnames()\n # test standard functions\n for (fname in binary.fnames) {\n results <- call.function(fname, om.data=binary.data, params)\n if (class(results) == \"try-error\") {\n try.errors[[fname]] <- results\n }\n # test meta functions for each standard function\n \n for (meta.fname in binary.meta.fnames) {\n results <- call.meta.function(meta.fname, fname, om.data=binary.data, params)\n if (class(results) == \"try-error\") {\n try.errors[[meta.fname]] <- results\n } \n }\n results <- subgroup.ma.binary(fname, binary.data, params, cov.name=\"groups\")\n if (class(results) == \"try-error\") {\n try.errors[[meta.fname]] <- results\n }\n }\n try.errors\n}\n\ntest.cont.functions <- function(cont.data, params, try.errors) {\n cont.fnames <- create.cont.fnames()\n for (fname in cont.fnames) {\n results <- call.function(fname, om.data=cont.data, params)\n if (class(results) == \"try-error\") {\n try.errors[[fname]] <- results\n }\n }\n try.errors\n}\n\ntest.diag.functions <- function(diagnostic.data, params, try.errors) {\n diag.fnames <- create.diag.fnames()\n for (fname in diag.fnames) {\n results <- call.function(fname, om.data=diagnostic.data, params)\n if (class(results) == \"try-error\") {\n try.errors[[fname]] <- results\n }\n }\n try.errors\n}\n\ncall.function <- function(fname, om.data, params) {\n results <- try(eval(call(fname, om.data, params)), silent=TRUE)\n}\n\ncall.meta.function <- function(meta.fname, fname, om.data, params, cov.name) {\n results <- try(eval(call(meta.fname, fname, om.data, params)), silent=TRUE)\n}" }, { "alpha_fraction": 0.26769229769706726, "alphanum_fraction": 0.2892307639122009, "avg_line_length": 15.105262756347656, "blob_id": "46e80cd57e2f7ecffbaa95e12a6e01b9a60e3180", "content_id": "7ba4e85112edcfccbca48a7620c70ab5e3e7433e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 325, "license_type": "no_license", "max_line_length": 34, "num_lines": 19, "path": "/src/R/HSROC/R/truncnorm2.R", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "truncnorm2 <-\r\nfunction (l, u, m, sd, n) \r\n{\r\n l1 <- pnorm((l - m)/sd)\r\n u1 <- pnorm((u - m)/sd)\r\n x <- runif(n, l1, u1)\r\n if (x == 0) {\r\n y = u\r\n }\r\n else {\r\n if (x == 1) {\r\n y = l\r\n }\r\n else {\r\n y <- qnorm(x) * sd + m\r\n }\r\n }\r\n return(y)\r\n}\r\n" }, { "alpha_fraction": 0.6722565293312073, "alphanum_fraction": 0.6907914280891418, "avg_line_length": 43.72368240356445, "blob_id": "0ab634cca22efbcbcfc78caa6b0bf8d0b12dfa1e", "content_id": "a5911587ddcb90ada5fd076cc439e3b0d5e4eb73", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3399, "license_type": "no_license", "max_line_length": 112, "num_lines": 76, "path": "/src/forms/ui_edit_group_name.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'change_group_name_dlg.ui'\n#\n# Created: Wed Apr 17 14:37:19 2013\n# by: PyQt4 UI code generator 4.10\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt4 import QtCore, QtGui\n\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n def _fromUtf8(s):\n return s\n\ntry:\n _encoding = QtGui.QApplication.UnicodeUTF8\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig, _encoding)\nexcept AttributeError:\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig)\n\nclass Ui_group_name_dialog(object):\n def setupUi(self, group_name_dialog):\n group_name_dialog.setObjectName(_fromUtf8(\"group_name_dialog\"))\n group_name_dialog.setEnabled(True)\n group_name_dialog.resize(301, 100)\n group_name_dialog.setMinimumSize(QtCore.QSize(301, 100))\n group_name_dialog.setMaximumSize(QtCore.QSize(500, 100))\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n group_name_dialog.setFont(font)\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(_fromUtf8(\":/images/meta.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n group_name_dialog.setWindowIcon(icon)\n self.verticalLayout = QtGui.QVBoxLayout(group_name_dialog)\n self.verticalLayout.setObjectName(_fromUtf8(\"verticalLayout\"))\n self.gridLayout = QtGui.QGridLayout()\n self.gridLayout.setObjectName(_fromUtf8(\"gridLayout\"))\n self.field_lbl = QtGui.QLabel(group_name_dialog)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n font.setPointSize(10)\n self.field_lbl.setFont(font)\n self.field_lbl.setObjectName(_fromUtf8(\"field_lbl\"))\n self.gridLayout.addWidget(self.field_lbl, 0, 0, 1, 1)\n self.group_name_le = QtGui.QLineEdit(group_name_dialog)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n self.group_name_le.setFont(font)\n self.group_name_le.setAlignment(QtCore.Qt.AlignCenter)\n self.group_name_le.setObjectName(_fromUtf8(\"group_name_le\"))\n self.gridLayout.addWidget(self.group_name_le, 0, 1, 1, 1)\n self.verticalLayout.addLayout(self.gridLayout)\n self.buttonBox = QtGui.QDialogButtonBox(group_name_dialog)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n self.buttonBox.setFont(font)\n self.buttonBox.setOrientation(QtCore.Qt.Horizontal)\n self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)\n self.buttonBox.setObjectName(_fromUtf8(\"buttonBox\"))\n self.verticalLayout.addWidget(self.buttonBox)\n\n self.retranslateUi(group_name_dialog)\n QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8(\"accepted()\")), group_name_dialog.accept)\n QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8(\"rejected()\")), group_name_dialog.reject)\n QtCore.QMetaObject.connectSlotsByName(group_name_dialog)\n\n def retranslateUi(self, group_name_dialog):\n group_name_dialog.setWindowTitle(_translate(\"group_name_dialog\", \"edit group name\", None))\n self.field_lbl.setText(_translate(\"group_name_dialog\", \"group name:\", None))\n\nimport icons_rc\n" }, { "alpha_fraction": 0.5596405267715454, "alphanum_fraction": 0.5616829991340637, "avg_line_length": 33.64622497558594, "blob_id": "91f2e18b18515bf7e7f0ac3e66297ba30195588a", "content_id": "8ac2d57ee00475d737fd877e26d873d6798c56fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7344, "license_type": "no_license", "max_line_length": 86, "num_lines": 212, "path": "/src/change_cov_type_form.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "#import pdb\nimport string\nimport math\n\nfrom PyQt4.Qt import *\nfrom PyQt4 import QtGui\n\nfrom meta_globals import *\nimport forms.ui_change_cov_type\nfrom forms.ui_change_cov_type import Ui_ChangeCovTypeForm\nfrom ma_dataset import Covariate\n\nclass ChangeCovTypeForm(QDialog, Ui_ChangeCovTypeForm):\n def __init__(self, dataset, cov, parent=None):\n super(ChangeCovTypeForm, self).__init__(parent)\n self.setupUi(self)\n self.dataset = dataset\n self.cov_model = CovModel(dataset, cov)\n self.cov_prev_table.setModel(self.cov_model)\n self.cov_prev_table.resizeColumnsToContents()\n\n\nclass CovModel(QAbstractTableModel):\n '''\n This module mediates between the dataset class and \n the TableView used in the ui.\n '''\n def __init__(self, dataset, covariate, filename=QString()):\n super(CovModel, self).__init__()\n self.dataset = dataset\n studies = self.dataset.studies\n\n self.covariate = covariate\n \n # now we add a covariate with the new type\n self.new_data_type = CONTINUOUS if covariate.data_type==FACTOR else FACTOR\n\n # first sort the studies by the cov. of interest\n self.dataset.studies.sort(\\\n cmp=self.dataset.cmp_studies(compare_by=self.covariate.name))\n \n self.update_included_studies()\n self.add_cov_with_new_type()\n\n self.refresh_cov_values()\n \n self.STUDY_COL, self.ORIG_VAL, self.NEW_VAL = range(3)\n\n def add_cov_with_new_type(self):\n new_name = self.covariate.name\n if self.new_data_type == CONTINUOUS:\n new_name += \" (continuous)\"\n else:\n new_name += \" (factor)\"\n\n guessed_vals = self.guess_at_values() # try and infer sensible values\n self.new_covariate = \\\n Covariate(new_name, COV_INTS_TO_STRS[self.new_data_type])\n\n self.dataset.add_covariate(self.new_covariate, cov_values=guessed_vals)\n self.reset()\n \n def guess_at_values(self):\n cov_d = self.dataset.get_values_for_cov(self.covariate) # original values\n guessed_vals_d = self.vals_to_new_vals(cov_d)\n\n studies_to_guessed_vals = {}\n for study in self.included_studies:\n if cov_d.has_key(study.name):\n orig_val = cov_d[study.name]\n studies_to_guessed_vals[study.name] = guessed_vals_d[orig_val]\n else:\n studies_to_guessed_vals[study.name] = None\n\n return studies_to_guessed_vals\n\n\n def vals_to_new_vals(self, cov_d):\n unique_values = list(set(cov_d.values()))\n # fix for issue #155\n unique_values.sort()\n mapping = {}\n for i,val in enumerate(unique_values):\n if self.new_data_type == FACTOR:\n mapping[val] = self._to_alphabet_str(i)\n else:\n mapping[val] = i\n \n print mapping\n return mapping\n\n def _is_a_num(self, x):\n try:\n y = float(x)\n return y\n except:\n # nope.\n return False\n\n def _to_alphabet_str(self, x):\n # base conversion.\n alphabet = string.ascii_lowercase\n alpha_str = \"\"\n x_left = x \n while x_left >= 0:\n if x_left > 25:\n alpha_str += \"a\"\n x_left -= 26\n else:\n alpha_str += alphabet[x_left]\n x_left = -1\n\n return alpha_str \n\n def refresh_cov_values(self):\n self.dataset.studies.sort(\\\n cmp=self.dataset.cmp_studies(compare_by=self.covariate.name))\n \n self.update_included_studies()\n cov_d = self.dataset.get_values_for_cov(self.covariate)\n new_cov_d = self.dataset.get_values_for_cov(self.new_covariate)\n\n self.orig_cov_list, self.new_cov_list = [], []\n for study in self.included_studies:\n if cov_d.has_key(study.name):\n self.orig_cov_list.append(cov_d[study.name]) \n self.new_cov_list.append(new_cov_d[study.name])\n else:\n self.orig_cov_list.append(None)\n self.new_cov_list.append(None)\n self.orig_cov_list.append(\"\")\n\n self.reset()\n \n def update_included_studies(self):\n study_list = []\n for study in self.dataset.studies:\n if study.include:\n study_list.append(study)\n self.included_studies = study_list\n\n def data(self, index, role=Qt.DisplayRole):\n \n if not index.isValid() or not (0 <= index.row() < len(self.included_studies)):\n return QVariant()\n\n orig_cov_val = self.orig_cov_list[index.row()]\n if role == Qt.DisplayRole:\n row, column = index.row(), index.column()\n if column == self.STUDY_COL:\n return QVariant(self.included_studies[row].name)\n elif column == self.ORIG_VAL:\n return QVariant(self.orig_cov_list[row])\n elif column == self.NEW_VAL:\n return QVariant(self.new_cov_list[row])\n elif role == Qt.TextAlignmentRole:\n return QVariant(int(Qt.AlignLeft|Qt.AlignVCenter))\n return QVariant()\n \n\n\n def rowCount(self, index=QModelIndex()):\n return len(self.included_studies) # don't show blank study!\n \n def columnCount(self, index=QModelIndex()):\n return 3 # study, orig_val, new_val\n \n def setData(self, index, value, role=Qt.EditRole):\n # don't allow users to mess with the original\n # covariate.\n if index.isValid() and 0 <= index.row() < len(self.dataset):\n column = index.column()\n\n if column == self.NEW_VAL:\n # then a (new) covariate value has been edited.\n #pyqtRemoveInputHook()\n #pdb.set_trace()\n study = self.included_studies[index.row()] # associated study\n cov_name = self.new_covariate.name\n new_value = None\n if self.new_covariate.data_type == FACTOR:\n new_value = value.toString()\n else:\n # continuous\n new_value, converted_ok = value.toDouble()\n if not converted_ok: \n print \"whoops! can't convert %s to a number.\" % value\n new_value = None\n study.covariate_dict[cov_name] = new_value\n self.refresh_cov_values()\n return True\n return False\n \n def flags(self, index):\n if not index.isValid():\n return Qt.ItemIsEnabled\n return Qt.ItemFlags(QAbstractTableModel.flags(self, index)|\n Qt.ItemIsEditable)\n\n\n def headerData(self, section, orientation, role=Qt.DisplayRole):\n if role == Qt.TextAlignmentRole:\n return QVariant(int(Qt.AlignLeft|Qt.AlignVCenter))\n if role != Qt.DisplayRole:\n return QVariant()\n if orientation == Qt.Horizontal:\n if section == self.STUDY_COL:\n return QVariant(\"study\")\n elif section == self.ORIG_VAL:\n return QVariant(self.covariate.name)\n elif section == self.NEW_VAL:\n return QVariant(self.new_covariate.name)" }, { "alpha_fraction": 0.6594940423965454, "alphanum_fraction": 0.6850828528404236, "avg_line_length": 43.66233825683594, "blob_id": "e0f9358e9fd3015ec79d074c7ff930bc419a90d3", "content_id": "d4ff4615e59dda63553ce58ead85bff45fdee360", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3439, "license_type": "no_license", "max_line_length": 111, "num_lines": 77, "path": "/src/forms/ui_new_group.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'new_group_dlg.ui'\n#\n# Created: Wed Apr 17 14:37:20 2013\n# by: PyQt4 UI code generator 4.10\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt4 import QtCore, QtGui\n\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n def _fromUtf8(s):\n return s\n\ntry:\n _encoding = QtGui.QApplication.UnicodeUTF8\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig, _encoding)\nexcept AttributeError:\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig)\n\nclass Ui_new_group_dialog(object):\n def setupUi(self, new_group_dialog):\n new_group_dialog.setObjectName(_fromUtf8(\"new_group_dialog\"))\n new_group_dialog.setEnabled(True)\n new_group_dialog.resize(301, 132)\n new_group_dialog.setMinimumSize(QtCore.QSize(301, 132))\n new_group_dialog.setMaximumSize(QtCore.QSize(301, 132))\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n new_group_dialog.setFont(font)\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(_fromUtf8(\":/images/meta.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n new_group_dialog.setWindowIcon(icon)\n self.buttonBox = QtGui.QDialogButtonBox(new_group_dialog)\n self.buttonBox.setGeometry(QtCore.QRect(10, 90, 281, 32))\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n self.buttonBox.setFont(font)\n self.buttonBox.setOrientation(QtCore.Qt.Horizontal)\n self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)\n self.buttonBox.setObjectName(_fromUtf8(\"buttonBox\"))\n self.layoutWidget = QtGui.QWidget(new_group_dialog)\n self.layoutWidget.setGeometry(QtCore.QRect(10, 10, 281, 71))\n self.layoutWidget.setObjectName(_fromUtf8(\"layoutWidget\"))\n self.gridLayout = QtGui.QGridLayout(self.layoutWidget)\n self.gridLayout.setMargin(0)\n self.gridLayout.setObjectName(_fromUtf8(\"gridLayout\"))\n self.label_2 = QtGui.QLabel(self.layoutWidget)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n font.setPointSize(10)\n self.label_2.setFont(font)\n self.label_2.setObjectName(_fromUtf8(\"label_2\"))\n self.gridLayout.addWidget(self.label_2, 0, 0, 1, 1)\n self.group_name_le = QtGui.QLineEdit(self.layoutWidget)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n self.group_name_le.setFont(font)\n self.group_name_le.setAlignment(QtCore.Qt.AlignCenter)\n self.group_name_le.setObjectName(_fromUtf8(\"group_name_le\"))\n self.gridLayout.addWidget(self.group_name_le, 0, 1, 1, 1)\n\n self.retranslateUi(new_group_dialog)\n QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8(\"accepted()\")), new_group_dialog.accept)\n QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8(\"rejected()\")), new_group_dialog.reject)\n QtCore.QMetaObject.connectSlotsByName(new_group_dialog)\n\n def retranslateUi(self, new_group_dialog):\n new_group_dialog.setWindowTitle(_translate(\"new_group_dialog\", \"add new tx group (arm)\", None))\n self.label_2.setText(_translate(\"new_group_dialog\", \"tx group name:\", None))\n\nimport icons_rc\n" }, { "alpha_fraction": 0.6784340143203735, "alphanum_fraction": 0.6980463266372681, "avg_line_length": 74.61849975585938, "blob_id": "26a2059c23c76cb6044790c617b0c3e81481fea1", "content_id": "0890738fe77d482286625f903a4144ebc98ebcfb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 13257, "license_type": "no_license", "max_line_length": 562, "num_lines": 173, "path": "/src/R/HSROC/man/HSROC.Rd", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "\\name{HSROC}\r\n\\alias{HSROC}\r\n\r\n\\title{A function for joint meta-analysis of sensitivity and specificity of a diagnostic test.}\r\n\r\n\\description{ This function is used to estimate the parameters of a hierarchical summary receiver operating characteristic (HSROC) model allowing for the reference standard to be possibly imperfect, and assuming it is conditionally independent from the test under evaluation. The estimation is carried out using a Gibbs sampler. }\r\n\r\n\\usage{\r\nHSROC(data, iter.num, init = NULL, sub_rs=NULL, \r\n first.run = TRUE, path=getwd(), refresh = 100, \r\n prior.SEref=NULL, prior.SPref=NULL, prior_PI=c(0,1), \r\n prior_LAMBDA = c(-3,3), prior_THETA = c(-1.5,1.5), \r\n prior_sd_alpha = list(0,2,\"sd\"), prior_sd_theta = list(0,2,\"sd\"), \r\n prior_beta = c(-0.75,0.75)) \r\n\r\n}\r\n\r\n\r\n\\arguments{ \r\n \\item{data}{a matrix with the number of rows equal to the number of studies and 4 columns. Each row consists of the entries of the 2x2 table of the index test (i.e. test under evaluation) vs. the reference test reported in each study. The ordering of the columns is ++, +-, -+, --, where the first entry refers to the result of the test under evaluation and the second entry refers to the result of the reference test. }\r\n \\item{iter.num}{the number of iterations of the Gibbs sampler.}\r\n \\item{init}{a list of initial values. See details for further explanation.}\t\r\n \\item{sub_rs}{a list that describes the partition of the reference standards among the studies, if any. The first element of the list is the number of different reference standards used. Element 2 to \\code{sub_rs[1]} are vectors of integers specifying the study numbers that used each reference test. See details.}\r\n \\item{prior.SEref}{a vector of values specifying the lower and upper limits of the sensitivity of the reference test(s) based on prior information. If there are multiple reference standards, the lower and upper limits are each specified as a vector of length \\code{sub_rs[1]}. The default value is NULL, implying a single reference standard with perfect sensitivity. }\r\n \\item{prior.SPref}{a vector of values specifying the lower and upper limits of the specificity of the reference test(s) based on prior information. If there are multiple reference standards, the lower and upper limits are each specified as a vector of length \\code{sub_rs[1]}. The default value is NULL, implying a single reference standard with perfect specificity. }\r\n \\item{first.run}{logical. If TRUE (default), the \\code{gibbs.sampler} function is run for the first time. If FALSE, the function assumes k iterations where already run and it continues from where it left off, that is from iteration k+1.}\r\n \\item{path}{a character string pointing to the directory where the sample drawn at each iteration of the Gibbs sampler are to be stored. }\r\n \\item{refresh}{A positive integer defining the frequency at which the Gibbs sampler's progress will be displayed on the R GUI. The default is 100. }\r\n\r\n \\item{prior_PI}{a vector with 2 components specifying the minimum and maximum values for the prevalence in each study based on prior information. If unspecified, \\eqn{Beta(1,1)}{Beta(1,1)} priors are used. }\r\n \\item{prior_LAMBDA}{a vector with 2 components specifying the minimum and maximum values for the difference in mean values (diagnostic accuracy) among disease positive and negative groups based on prior information. The default value is \\code{c(-3,3)} implying a \\eqn{U(-3,3)}{U(-3,3)}.}\r\n \\item{prior_THETA}{a vector with 2 components specifying the minimum and maximum values for the overall mean cut-off value for defining a positive test based on prior information. The default value is \\code{c(-1.5,1.5)} implying a \\eqn{U(-1.5,1.5)}{U(-1.5,1.5)}.}\r\n \\item{prior_sd_alpha}{a list with 3 components. The first 2 components are specifying the minimum and maximum values for the between study standard deviation in the difference in mean values of the disease positive and negative groups in the \\eqn{i^{th}}{ith} study, \\eqn{\\alpha_i}{alpha_i}, based on prior information. The third component determine whether we want the prior to be on the standard deviation (sd) or the variance (v). The default value is \\code{list(0,2,\"sd\")} implying a \\eqn{U(0,2)}{U(0,2)} prior. }\r\n \\item{prior_sd_theta}{a list with 3 components. The first 2 components are specifying the minimum and maximum values for the between study standard deviation in the cut-off, \\eqn{\\theta_i}{theta_i}, in the \\eqn{i^{th}}{ith} study based on prior information. The third component determine whether we want the prior to be on the standard deviation (s) or the variance (v). The default value is \\code{list(0,2,\"sd\")} implying a \\eqn{U(0,2)}{U(0,2)} prior. }\r\n \\item{prior_beta}{a vector with 2 components specifying the minimum and maximum values for the logarithm of the ratio of the standard deviation of test results among patients with and without the disease, based on prior belief. This parameter is assumed to be constant across studies. The default value is \\code{c(-0.75,0.75)} implying a \\eqn{U(-0..75,0.75)}{U(-0.75,0.75)}. If the argument is (\\code{NULL}) the function assumes a range of (-log( (LAMBDA.up/3) + 1 ) , log( (LAMBDA.up/3) + 1 ) ), where LAMBDA.up is the upper limit of \\code{prior.LAMBDA}. }\r\n\r\n\r\n}\r\n\r\n\\details{\r\n\r\nOur HSROC model uses a probit link function and not the logit link function used in the HSROC model developped by Rutter and Gatsonis (2001). \r\n\r\nThe probability of a positive result on the index test for the \\eqn{j^{th}}{jth} individual in the \\eqn{i^{th}}{ith} study is given by :\r\n\r\n\\deqn{1 - \\Phi( ( \\theta_i - \\alpha_i D_{ij} )exp(-\\beta D_{ij}) ),}{1 - PHI( (theta_i - alpha_i D_{ij})*exp(-beta D_{ij})),}\r\n\r\nwhile the probability of a negative result on the index test for the \\eqn{j^{th}}{jth} individual in the \\eqn{i^{th}}{ith} study is given by :\r\n\r\n\\deqn{\\Phi( ( \\theta_i - \\alpha_i D_{ij} )exp(-\\beta D_{ij}) ),}{PHI( (theta_i - alpha_i D_{ij})*exp(-beta D_{ij})),}\r\n\r\nwhere \\eqn{D_{ij}}{D_ij} = 0.5 if the \\eqn{j^{th}}{jth} individual in the \\eqn{i^{th}}{ith} study is disease positive and -0.5 if the \\eqn{j^{th}}{jth} individual in the \\eqn{i^{th}}{ith} study is not disease negative.\r\n\r\n\r\nWhen ranges are provided for \\code{prior.SEref}, \\code{prior.SPref} and \\code{prior_PI} they are transformed into Beta prior distributions using the method described in \\code{beta.parameter}.\r\n\r\nIf the argument \\code{init} = \\code{NULL}, the function will randomly generate initial values based on the prior information. Otherwise, the user can provide his own initial values for the within-study and between-study parameters and for the reference standards through the \\code{init} argument, with the within-study, between study and reference standard initial values as the first, second and third element of \\code{init}, respectively. Furthermore, \r\nthe within-study parameters must be a matrix-like object with each column being initial values for \\eqn{\\alpha_i}{alpha_i}, \\eqn{\\theta_i}{theta_i}, sensitivity of the test under evaluation \\eqn{S_{1i}}{S1_i}, specificity of the test under evaluation \\eqn{C_{1i}}{C1_i} and prevalence \\eqn{\\pi_i}{pi_i}. \r\nThe between-study parameters must be a vector of the following initial values : \\eqn{\\Theta}{THETA}, \\eqn{\\sigma_{\\theta}}{sigma_theta}, \\eqn{\\Lambda}{LAMBDA}, \\eqn{\\sigma_{\\alpha}}{sigma_alpha} and \\eqn{\\beta}{beta}. The initial values for the reference standard must be a 2 X \\code{sub_rs[[1]]} matrix-like object. \r\nThe first row must be the initial values of the sensitivity of the reference standard, while the second row must correspond to initial values of the specificity of the reference standard. The ordering described above in the within-study, between-study and reference standard parameters must be preserved. \r\n\r\n\r\nThe first element of the list-object \\code{sub_rs} corresponds to the number of different reference standards. The default value is 1. The number of additional elements will depend on the value of the first element. There must be as many additional elements in \\code{sub_rs} as there are different reference standards. Assuming the studies are labelled 1, ..., N, \r\neach of these additional elements must be a vector (possibly of length one) taking as their values the labelling of the corresponding studies sharing the same reference standard. For example, if we have 2 reference tests, the first one aplied over study 1-10 and the second one applied over study 11-15 then the \\code{sub_rs} list-argument should be of length 3 with the following elements : 3, 1:10, 11:15\r\n\r\nThe \\code{path} argument points to the working directory where files written at each iteration of the Gibbs sampler will be saved. If no path is provided, the current working directory will be used\r\n}\r\n\r\n\\value{\r\n\r\nText files with samples from the joint posterior distribution of the between-study parameters, within-study parameters and performance parameters of the reference standard(s) are created in the \\code{path} directory. These results can be summarized using the \\code{HSROCSummary} function.\r\n\r\nThe following files are also created and saved in the \\code{path} directory :\r\n\r\n\\dQuote{Prior.information.txt}, lists the prior distributions.\r\n\r\n\\dQuote{Initial values.txt}, lists the initial values used. If the argument \\code{init} = \\code{NULL}, the initial value file is called \\dQuote{Random Initial values.txt}.\r\n\r\nA series of files listing the values of various parameters sampled in the last iteration of the Gibbs sampler as follows :\r\n\r\n\\dQuote{REstarting values.txt}, contains values of the within-study parameters (\\eqn{\\alpha_i}{alpha_i}, \\eqn{\\theta_i}{theta_i}, sensitivity of test under evaluation ( \\eqn{S_{1i}}{S1_i} ), specificity of test under evaluation ( \\eqn{C_{1i}}{C1_i} ) and prevalence ( \\eqn{\\pi_i}{pi_i} ) ).\r\n\r\n\\dQuote{REstarting values 2.txt}, contains values of the between-study parameters (\\eqn{\\Lambda}{LAMBDA}, standard deviation of \\eqn{\\alpha_i}{alpha_i}, ( \\eqn{\\sigma_{\\alpha}}{sigma_alpha} ), \\eqn{\\Theta}{THETA}, standard deviation of \\eqn{\\theta_i}{theta_i} ( \\eqn{\\sigma_{\\theta}}{sigma_theta }) and \\eqn{\\beta}{beta} ). \r\n\r\n\\dQuote{REstarting REFSTD.txt}, contains values of the sensitivity and specificity of the reference test.\r\n\r\n\\dQuote{REstart values index.txt}, lists the variable names in the 3 files described above.\r\n\r\n\r\n}\r\n\r\n\\references{\r\nN. Dendukuri, I. Schiller, L. Joseph and M. Pai \\emph{Bayesian meta-analysis of the accuracy of a test for tuberculosis pleuritis in the absence of a gold-standard reference}. (Under review).\r\n\r\nC. M. Rutter and C. A. Gatsonis. \\emph{A hierarchical regression approach to meta-analysis of diagnostic accuracy evaluations}. Statistics in Medicine, 20(19):2865-2884, 2001.\r\n}\r\n \r\n\\examples{\r\n\r\n#===============================================================\r\n#TO SET UP THE REFERENCE STANDARD\r\n#===============================================================\r\n\r\n\r\n#There were three different reference standards for the In.house dataset. \r\n#The first reference standard was used in study 1 and 2, \r\n#the second was used in studies 3 and 4 and the third in study 5 to 12. \r\nREFSTD = list(3, 1:2, 3:4, 5:11) \r\n\r\n#===============================================================\r\n#TO SET UP DATA AND INITIAL VALUES\r\n#===============================================================\r\n\r\ndata(In.house)\r\nM = length(In.house[,1])\r\n\r\n\r\n#Initial values for the within-study parameters\r\ninit.alpha = rep(2.5, M) ;\tinit.theta = rep(1, M) ;\r\ninit.s1 = rep(0.5, M) ;\tinit.c1 = rep(0.5, M) ;\r\ninit.pi = rep(0.5, M)\r\n\r\n#Initial values for the between-study parameters\r\ninit.THETA = 1 ;\tinit.sd.theta = 0.5 ;\r\ninit.LAMBDA = 2.5 ;\tinit.sd.alpha = 0.5 ;\r\ninit.beta = 0 ;\r\n\r\n#Initial values for the reference standard sensitivities and specificities\r\ninit.s2 = rep(0.5, REFSTD[[1]]) ;\tinit.c2 = rep(0.5, REFSTD[[1]])\r\n\r\n#The ordering of the initial values is important!\r\ninit1 = cbind(init.alpha, init.theta, init.s1, init.c1, init.pi)\r\ninit2 = c(init.THETA, init.sd.theta, init.LAMBDA, init.sd.alpha, init.beta)\r\ninit3 = rbind(init.s2, init.c2)\r\n\r\ninit = list(init1, init2, init3)\r\n#===============================================================\r\n#TO PROVIDE PRIOR INFORMATION\r\n#===============================================================\r\n\r\nS2.a = c(0.2, 0.2, 0.7) ; \tS2.b = c(0.6, 0.7, 0.9)\r\nC2.a = rep(0.9, 3) ;\tC2.b = rep(1, 3)\r\n\r\n#===============================================================\r\n#TO RUN GIBBS SAMPLER\r\n#===============================================================\r\n\r\n\r\n\r\n\\dontrun{\r\n\r\nset.seed(10)\r\nestimates = HSROC(data=In.house, init=init, iter.num=5000, \r\n prior.SEref=c(S2.a,S2.b), prior.SPref=c (C2.a,C2.b), sub_rs=REFSTD) \r\n\r\n#Putting prior information on sigma_alpha^2 (sigma_alphs squared) \r\n#instead of sigma_alpha\r\nset.seed(10)\r\nestimates = HSROC(data=In.house, init=init, iter.num=5000, \r\n prior.SEref=c(S2.a,S2.b), prior.SPref=c (C2.a,C2.b), \r\n\t\tsub_rs=REFSTD, prior_sd_alpha = list(0,2,\"v\"))\r\n\r\n\r\n#Letting the function select randomly its own initial values\r\nset.seed(10)\r\nestimates = HSROC(data=In.house, iter.num=5000, \r\n prior.SEref=c(S2.a,S2.b), prior.SPref=c (C2.a,C2.b), \r\n\t\tsub_rs=REFSTD)\r\n\r\n}\r\n}\r\n\r\n\\keyword{models}\r\n\r\n" }, { "alpha_fraction": 0.6271186470985413, "alphanum_fraction": 0.6343825459480286, "avg_line_length": 27.517240524291992, "blob_id": "3b77f7da457f51e876aca0c8547e24065542f8df", "content_id": "ce085f1903776370cb1a11a981849b6788560db9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 826, "license_type": "no_license", "max_line_length": 78, "num_lines": 29, "path": "/src/win_prelaunch.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "'''\nThis file should only be used when launching a windows build, not during\ndevelopement\n\n@author: George Dietz\n CEBM@Brown\n'''\n\nimport os\n\n# # Set R environment variables\n# oldpath = os.environ[\"PATH\"]\n# cwd = os.getcwd()\n# rpath = os.path.join(cwd, \"R_dist\") # second 'Resources' is R directory\n# # just adding the 64-bit path version for now\n# os.environ[\"PATH\"] = os.path.join(rpath, \"bin\",\"x64\") + os.pathsep + oldpath\n# print(\"new path is: %s\" % os.environ[\"PATH\"])\n# \n# #os.environ[\"R\"] = os.path.join(cwd, rpath, \"bin\")\n# os.environ[\"R_HOME\"] = os.path.join(cwd, rpath)\n# #os.environ[\"R_HOME\"] = os.path.join(rpath, \"bin\",\"x64\")\n# print(\"R_HOME: %s\" % os.environ[\"R_HOME\"])\n# \n# os.environ[\"R_USER\"] = \"oma\" \n\n# we are ready to start the main program loop\nimport launch\nif __name__ == \"__main__\":\n launch.start()" }, { "alpha_fraction": 0.5707359313964844, "alphanum_fraction": 0.5715647339820862, "avg_line_length": 40.608619689941406, "blob_id": "e4e7eab8c1bb26d455d5dac5b27afa99c4500918", "content_id": "453259aeae6952845ebbccacc4b516a2ba54a0f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 24132, "license_type": "no_license", "max_line_length": 139, "num_lines": 580, "path": "/src/main_wizard.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "import forms.ui_choose_metric_page\nimport forms.ui_csv_import_page\nimport forms.ui_data_type_page\nimport forms.ui_outcome_name_page\nimport forms.ui_welcome_page\n\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nimport meta_globals\nfrom ma_data_table_model import DatasetModel\n\n\nclass WelcomePage(QWizardPage, forms.ui_welcome_page.Ui_WizardPage):\n def __init__(self, parent=None, recent_datasets=[]):\n super(WelcomePage, self).__init__(parent)\n self.setupUi(self)\n \n self.recent_datasets = recent_datasets\n self.selected_dataset = None\n self.setPixmap(QWizard.BackgroundPixmap, QPixmap(':/wizard_images/wizard_images/forest.jpg'))\n self._setup_connections()\n \n def initializePage(self):\n #self.wizard().adjustSize()\n pass\n\n def isComplete(self): # disable next/back buttons\n return False\n \n def nextId(self):\n #print(\"wizard path is: %s\" % str(self.wizard().get_wizard_path()))\n if self.wizard().get_wizard_path() == \"open\":\n return -1\n else:\n return Page_DataType \n \n def _setup_connections(self):\n QObject.connect(self.create_new_btn, SIGNAL(\"clicked()\"), self.new_dataset)\n QObject.connect(self.open_btn, SIGNAL(\"clicked()\"), self.open_dataset)\n self._setup_open_recent_btn()\n QObject.connect(self.import_csv_btn, SIGNAL(\"clicked()\"), self.import_csv)\n \n def _setup_open_recent_btn(self):\n if len(self.recent_datasets) > 0:\n ### \n # then add a drop-down to the 'open recent' \n # button with the recent datasets.\n qm = QMenu()\n for dataset in self.recent_datasets[::-1]: # most recent dataset is last in list\n action_item = QAction(QString(dataset), qm)\n qm.addAction(action_item)\n # I wanted to handle this with lambdas, but the method would\n # inexplicably always be invoked with the last dataset as the\n # argument. Instead, I've opted to use the .sender method to\n # retrieve the action_item, i.e., dataset, selected (see\n # the dataset_selected routine).\n QObject.connect(action_item, SIGNAL(\"triggered()\"), self.dataset_selected) \n self.open_recent_btn.setMenu(qm)\n else:\n self.open_recent_btn.setEnabled(False)\n \n def dataset_selected(self):\n self.wizard().set_wizard_path(\"open\")\n \n # we use the sender method to see which menu item was\n # triggered\n dataset_path = QObject.sender(self).text() # is a qstring\n dataset_path = unicode(dataset_path.toUtf8(),'utf8')\n self.selected_dataset = dataset_path\n self.wizard().set_selected_dataset(self.selected_dataset)\n self.wizard().accept()\n \n def open_dataset(self):\n self.wizard().set_wizard_path(\"open\")\n\n self.selected_dataset = QFileDialog.getOpenFileName(\n parent=self,\n caption=QString(\"OpenMeta[analyst] - Open File\"),\n directory=\".\",\n filter=\"open meta files (*.oma)\")\n self.selected_dataset = unicode(self.selected_dataset.toUtf8(),'utf8')\n\n if self.selected_dataset != '':\n self.wizard().set_selected_dataset(self.selected_dataset)\n self.wizard().accept()\n\n def import_csv(self):\n self.wizard().set_wizard_path(\"csv_import\")\n self.wizard().next()\n \n def new_dataset(self):\n self.wizard().set_wizard_path(\"new_dataset\")\n self.wizard().next()\n \n################################################################################\n \nclass DataTypePage(QWizardPage, forms.ui_data_type_page.Ui_DataTypePage):\n def __init__(self, parent=None):\n super(DataTypePage, self).__init__(parent)\n self.setupUi(self)\n \n self.selected_datatype = None\n self.summary = dict(arms=None, data_type=None, sub_type=None, effect=None, metric_choices=[], name=None) #ProjectInfo()\n \n QObject.connect(self.buttonGroup, SIGNAL(\"buttonClicked(QAbstractButton*)\"), self._button_selected)\n \n self.setPixmap(QWizard.BackgroundPixmap, QPixmap(':/wizard_images/wizard_images/laplace.jpg'))\n \n def initializePage(self):\n #self.wizard().adjustSize()\n self.setFocus()\n \n def _button_selected(self, button):\n #print(\"button clicked %s\" % str(button))\n \n if button == self.onearm_proportion_Button:\n self.summary['arms'] = 'one'\n self.summary['data_type'] = 'binary'\n self.summary['sub_type'] = 'proportion'\n self.summary['effect'] = \"PR\" # default effect\n self.summary['metric_choices'] = meta_globals.BINARY_ONE_ARM_METRICS\n elif button == self.onearm_mean_Button:\n self.summary['arms'] = 'one'\n self.summary['data_type'] = 'continuous'\n self.summary['sub_type'] = 'mean'\n self.summary['effect'] = meta_globals.DEFAULT_CONTINUOUS_ONE_ARM\n self.summary['metric_choices'] = meta_globals.CONTINUOUS_ONE_ARM_METRICS\n elif button == self.onearm_single_reg_coef_Button:\n self.summary['arms'] = 'one'\n self.summary['data_type'] = 'continuous'\n self.summary['sub_type'] = 'reg_coef'\n self.summary['effect'] = meta_globals.DEFAULT_CONTINUOUS_ONE_ARM\n self.summary['metric_choices'] = meta_globals.CONTINUOUS_ONE_ARM_METRICS\n elif button == self.onearm_generic_effect_size_Button:\n self.summary['arms'] = 'one'\n self.summary['data_type'] = 'continuous'\n self.summary['sub_type'] = 'generic_effect' # TODO: Should disable_two-arm metrics for generic effect\n self.summary['effect'] = meta_globals.DEFAULT_CONTINUOUS_ONE_ARM\n self.summary['metric_choices'] = meta_globals.CONTINUOUS_ONE_ARM_METRICS\n #twoarm\n elif button == self.twoarm_proportions_Button:\n self.summary['arms'] = 'two'\n self.summary['data_type'] = 'binary'\n self.summary['sub_type'] = 'proportions'\n self.summary['effect'] = \"OR\"\n self.summary['metric_choices'] = meta_globals.BINARY_TWO_ARM_METRICS\n elif button == self.twoarm_means_Button:\n self.summary['arms'] = 'two' \n self.summary['data_type'] = 'continuous'\n self.summary['sub_type'] = 'means'\n self.summary['effect'] = \"MD\"\n self.summary['metric_choices'] = meta_globals.CONTINUOUS_TWO_ARM_METRICS\n elif button == self.twoarm_smds_Button:\n self.summary['arms'] = 'two' \n self.summary['data_type'] = 'continuous'\n self.summary['sub_type'] = 'smd'\n self.summary['effect'] = \"SMD\"\n self.summary['metric_choices'] = meta_globals.CONTINUOUS_TWO_ARM_METRICS\n #diagnostic\n elif button == self.diagnostic_Button:\n self.summary['data_type'] = 'diagnostic'\n \n # Put information from pressing the button into the wizard storage area\n self.wizard().set_dataset_info(self.summary)\n self.emit(SIGNAL(\"completeChanged()\"))\n \n def isComplete(self):\n #print(self.buttonGroup.checkedButton())\n \n if self.buttonGroup.checkedButton():\n return True\n else:\n return False\n \n def nextId(self):\n if self.wizard().get_dataset_info() and self.wizard().get_dataset_info()['data_type'] == 'diagnostic':\n return Page_OutcomeName\n else: #normal case\n return Page_ChooseMetric\n \n \n \n \n###############################################################################\nclass ChooseMetricPage(QWizardPage, forms.ui_choose_metric_page.Ui_WizardPage):\n def __init__(self, parent=None):\n super(ChooseMetricPage, self).__init__(parent)\n self.setupUi(self)\n \n QObject.connect(self.metric_cbo_box, SIGNAL(\"currentIndexChanged(int)\"), self._metric_choice_changed)\n \n def initializePage(self):\n data_type = self.wizard().get_dataset_info()['data_type']\n metric_choices = self.wizard().get_dataset_info()['metric_choices']\n default_effect = self.wizard().get_dataset_info()['effect']\n \n # Add metric choices to combo box\n self.metric_cbo_box.blockSignals(True)\n self.metric_cbo_box.clear()\n self.metric_cbo_box.blockSignals(False)\n if data_type != 'diagnostic':\n self.metric_cbo_box.blockSignals(True)\n for metric in metric_choices:\n metric_pretty_name = meta_globals.ALL_METRIC_NAMES[metric]\n self.metric_cbo_box.addItem(QString(metric + \": \" + metric_pretty_name), userData=QVariant(QString(metric)))\n index_of_default = self.metric_cbo_box.findData(QVariant(QString(default_effect)))\n self.metric_cbo_box.setCurrentIndex(index_of_default)\n \n default_item_text = self.metric_cbo_box.itemText(index_of_default)\n default_item_text += QString(\" (DEFAULT)\")\n self.metric_cbo_box.setItemText(index_of_default, default_item_text)\n # Resize the dialog\n self.metric_cbo_box.blockSignals(False)\n \n self.setPixmap(QWizard.BackgroundPixmap, QPixmap(':/wizard_images/wizard_images/airy.jpg'))\n #self.wizard().adjustSize()\n \n def _metric_choice_changed(self, newindex):\n self.wizard().set_effect(str(self.metric_cbo_box.itemData(newindex).toString()))\n \n def nextId(self):\n return Page_OutcomeName\n############################################################################### \n\nimport csv\n\nclass CsvImportPage(QWizardPage, forms.ui_csv_import_page.Ui_WizardPage):\n def __init__(self, parent=None):\n super(CsvImportPage, self).__init__(parent)\n self.setupUi(self)\n \n self.connect(self.select_file_btn, SIGNAL(\"clicked()\"), self._select_file)\n self.connect(self.from_excel_chkbx, SIGNAL(\"stateChanged(int)\"), self._rebuild_display)\n self.connect(self.has_headers_chkbx, SIGNAL(\"stateChanged(int)\"), self._rebuild_display)\n \n self.setPixmap(QWizard.BackgroundPixmap, QPixmap(':/wizard_images/wizard_images/cochran.jpg'))\n \n def initializePage(self):\n ######################################################\n self.file_path = None\n self._reset_data()\n ######################################################\n \n self.required_header_labels = self._get_required_header_labels()\n self.required_fmt_table.setRowCount(2)\n self.required_fmt_table.setColumnCount(len(self.required_header_labels))\n\n self.required_fmt_table.setHorizontalHeaderLabels(self.required_header_labels)\n self.required_fmt_table.resizeColumnsToContents()\n self.required_fmt_table.resizeRowsToContents()\n \n # Set up preview format table\n for row in range(self.required_fmt_table.rowCount()):\n for col in range(self.required_fmt_table.columnCount()):\n self.required_fmt_table.setItem(row,col,QTableWidgetItem(\"\"))\n self.required_fmt_table.item(row, col).setFlags(Qt.NoItemFlags)\n \n def isComplete(self):\n # We must have a file selected\n if not self.file_path:\n return False \n \n if self.imported_data_ok:\n self.wizard().set_csv_data(self.csv_data()) # stick csv data into wizard\n return True\n else:\n return False\n\n def _reset_data(self):\n self.preview_table.clear()\n self.headers = []\n self.covariate_names = []\n self.covariate_types = []\n self.imported_data = []\n self.imported_data_ok = True\n \n def _select_file(self):\n self.file_path = QFileDialog.getOpenFileName(\n parent=self,\n caption=QString(\"OpenMeta[analyst] - Import CSV\"),\n directory=\".\",\n filter=\"csv files (*.csv)\")\n self.file_path = unicode(self.file_path.toUtf8(),'utf8')\n\n if self.file_path:\n self.file_path_lbl.setText(QString(self.file_path))\n \n if self.file_path:\n self._rebuild_display()\n \n def _rebuild_display(self):\n self._reset_data()\n try:\n self.extract_data()\n except Exception as e:\n print(e)\n QMessageBox.warning(self, \"Whoops\", \"Something went wrong while trying to import csv, try again\")\n self.imported_data_ok = False\n return False\n \n num_rows = len(self.imported_data)\n num_cols = len(self.imported_data[0])\n self._handle_covariates_in_extracted_data(\n num_rows, num_cols, headers = self.headers, \n expected_headers = self.required_header_labels)\n \n if len(self.imported_data) == 0:\n QMessageBox.warning(self, \"Whoops\", \"No data in CSV!, try again\")\n self.imported_data_ok = False\n return False\n \n # set up table\n self.preview_table.setRowCount(num_rows)\n self.preview_table.setColumnCount(num_cols)\n if self.headers != []:\n self.preview_table.setHorizontalHeaderLabels(self.headers)\n else:\n preview_header_labels = self.required_header_labels[:]\n preview_header_labels.extend(self.covariate_names)\n self.preview_table.setHorizontalHeaderLabels(preview_header_labels)\n \n # copy extracted data to table\n for row in range(num_rows):\n for col in range(num_cols):\n item = QTableWidgetItem(QString(self.imported_data[row][col]))\n item.setFlags(Qt.NoItemFlags)\n self.preview_table.setItem(row,col,item)\n self.preview_table.resizeColumnsToContents()\n self.preview_table.resizeRowsToContents()\n \n # Validate table entries\n self._validate_imported_data()\n self.emit(SIGNAL(\"completeChanged()\"))\n \n def _validate_imported_data(self):\n # Make sure there are at least as many columns as required columns\n # (additional columns are covariates hopefully) \n# if self.preview_table.columnCount() < self.required_fmt_table.columnCount():\n# QMessageBox.warning(self, \"Whoops\", \"There are two few columns in the imported csv, try again with a properly formatted CSV.\")\n# self._reset_data\n# return False\n \n # Are the years integers?\n for row in range(len(self.imported_data)):\n try:\n # -1 since the imported data doesn't have an 'include' column\n int(self.imported_data[row][DatasetModel.YEAR-1])\n except ValueError:\n QMessageBox.warning(self, \"Whoops\", \"The year at row \" + str(row+1) + \" is not an integer number.\")\n self.imported_data_ok = False\n return False\n # More validation??\n \n def _get_required_header_labels(self):\n '''\n Provides column header labels based on chosen datatype and subtype\n ** Must be updated if header_data() is ma_data_table_model is changed\n ''' \n \n data_type = self.wizard().get_dataset_info()['data_type']\n data_subtype = self.wizard().get_dataset_info()['sub_type']\n effect = self.wizard().get_dataset_info()['effect']\n raw_cols, outcome_cols = DatasetModel.get_column_indices(data_type, data_subtype)\n \n header_labels = []\n \n model_cols = [DatasetModel.NAME, DatasetModel.YEAR]\n model_cols.extend(raw_cols)\n model_cols.extend(outcome_cols)\n \n for col in model_cols:\n col_name = DatasetModel.helper_basic_horizontal_headerData(\n section=col, data_type=meta_globals.STR_TO_TYPE_DICT[data_type], sub_type=data_subtype,\n raw_columns=raw_cols, outcome_columns=outcome_cols,\n current_effect=effect,\n groups=meta_globals.DEFAULT_GROUP_NAMES)\n col_name = str(col_name.toString())\n header_labels.append(col_name)\n return header_labels\n \n def csv_data(self):\n ''' Imported data is a list of rows. A row is a list of\n cell contents (as strings) '''\n \n if self.imported_data_ok:\n return {'headers':self.headers,\n 'data':self.imported_data,\n 'expected_headers':self.required_header_labels,\n 'covariate_names': self.covariate_names,\n 'covariate_types': self.covariate_types,}\n else:\n print(\"Something went wrong while trying to import from csv\")\n return None\n \n def _handle_covariates_in_extracted_data(self, num_rows, num_cols, headers=[], expected_headers=[]):\n if num_cols > len(expected_headers): # Do we have covariates?\n num_covariates = num_cols - len(expected_headers)\n print(\"There are %d covariates\" % num_covariates)\n else:\n return None # no covariates to deal with\n \n def covariate_name(index, given_name):\n if str(given_name).strip() == \"\":\n return \"Covariate \"+str(index+1)\n else:\n return given_name\n \n if self._hasHeaders():\n covariate_names = headers[len(expected_headers):]\n else:\n covariate_names = [\"\"]*num_covariates\n self.covariate_names = [covariate_name(i, name) for i,name in enumerate(covariate_names)]\n \n def covariate_type(data):\n for x in data:\n try:\n float(x)\n except ValueError:\n return \"factor\" # these types are important to get right (look in covariate constructor)\n return \"continuous\" #\n\n index_offset = len(expected_headers)\n for cov_index in range(len(covariate_names)):\n cov_data = [self.imported_data[row][index_offset+cov_index] for row in range(num_rows)]\n self.covariate_types.append(covariate_type(cov_data))\n \n def extract_data(self):\n with open(self._get_filepath(), 'rU') as csvfile:\n args_csv_reader = {'delimiter': self._get_delimter(),\n 'quotechar': self._get_quotechar(),\n }\n if self._isFromExcel():\n args_csv_reader = {}\n args_csv_reader['dialect']='excel'\n \n # set up reader object\n reader = csv.reader(csvfile, **args_csv_reader)\n \n self.headers = []\n self.imported_data = []\n if self._hasHeaders():\n self.headers = reader.next()\n for row in reader:\n self.imported_data.append(row)\n self.print_extracted_data() # just for debugging\n \n def print_extracted_data(self):\n print(\"Data extracted from csv:\")\n print(self.headers)\n for row in self.imported_data:\n print(str(row))\n\n def _get_filepath(self):\n return self.file_path\n def _isFromExcel(self):\n return self.from_excel_chkbx.isChecked()\n def _hasHeaders(self):\n return self.has_headers_chkbx.isChecked()\n def _get_delimter(self):\n return str(self.delimter_le.text())\n def _get_quotechar(self):\n return str(self.quotechar_le.text())\n################################################################################\nclass OutcomeNamePage(QWizardPage, forms.ui_outcome_name_page.Ui_WizardPage):\n def __init__(self, parent=None):\n super(OutcomeNamePage, self).__init__(parent)\n self.setupUi(self)\n \n self.setPixmap(QWizard.BackgroundPixmap, QPixmap(':/wizard_images/wizard_images/fisher.jpg'))\n \n self.registerField(\"outcomeName*\", self.outcome_name_LineEdit)\n \n def initializePage(self):\n #self.wizard().adjustSize()\n pass\n \n def nextId(self):\n if self.wizard().get_wizard_path()==\"csv_import\":\n return Page_CsvImport\n else: #normal case\n return -1\n################################################################################ \nPage_Welcome, Page_DataType, Page_ChooseMetric, Page_OutcomeName, Page_CsvImport = range(5)\nclass MainWizard(QWizard):\n def __init__(self, parent=None, path=None, recent_datasets=[]):\n super(MainWizard, self).__init__(parent)\n \n self.info_d = {}\n self.info_d['path'] = path\n self.setPage(Page_Welcome, WelcomePage(recent_datasets=recent_datasets))\n self.setPage(Page_DataType, DataTypePage())\n self.setPage(Page_ChooseMetric, ChooseMetricPage())\n self.setPage(Page_OutcomeName, OutcomeNamePage())\n self.setPage(Page_CsvImport, CsvImportPage())\n \n if path is None:\n self.setStartId(Page_Welcome)\n self.setWindowTitle(\"Open Meta-Analyst\")\n elif path is \"csv_import\":\n self.setStartId(Page_DataType)\n self.setWindowTitle(\"Import a CSV\")\n elif path is \"new_dataset\":\n self.setStartId(Page_DataType)\n self.setWindowTitle(\"Create a new dataset\")\n \n\n #self.setPixmap(QtGui.QWizard.BannerPixmap,\n # QtGui.QPixmap(':/misc/meta.png'))\n #self.setPixmap(QtGui.QWizard.BackgroundPixmap,\n # QtGui.QPixmap(':/misc/meta.png'))\n \n # make the displayed size of the pages reasonable\n QObject.connect(self, SIGNAL(\"currentIdChanged(int)\"), self._change_size)\n \n def _change_size(self, pageid):\n self.adjustSize()\n \n def set_wizard_path(self, path):\n self.info_d['path']=path\n def get_wizard_path(self):\n if 'path' in self.info_d:\n return self.info_d['path']\n else:\n return None\n \n def set_dataset_info(self, outcome_info):\n self.info_d['outcome_info'] = outcome_info\n \n def get_dataset_info(self):\n if 'outcome_info' in self.info_d:\n return self.info_d['outcome_info']\n else:\n return None\n \n def set_selected_dataset(self, dataset):\n self.info_d['selected_dataset'] = dataset\n def get_selected_dataset(self):\n if 'selected_dataset' in self.info_d:\n return self.info_d['selected_dataset']\n else:\n return None\n \n def set_effect(self, effect_name):\n self.info_d['outcome_info']['effect'] = effect_name\n \n def get_effect(self):\n return self.info_d['outcome_info']['effect']\n \n def set_csv_data(self, csv_data):\n self.info_d['csv_data'] = csv_data\n \n def get_csv_data(self):\n if 'csv_data' in self.info_d:\n return self.info_d['csv_data']\n else:\n return None\n \n \n \n def get_results(self):\n information = {}\n information['path']=self.get_wizard_path()\n information['outcome_info']=self.get_dataset_info()\n # set outcome name\n if information['outcome_info'] is not None:\n information['outcome_info']['name']=self.field(\"outcomeName\").toString()\n information['selected_dataset'] = self.get_selected_dataset()\n information['csv_data'] = self.get_csv_data()\n \n print(\"Information from wizard: %s\" % str(information))\n return information\n \nif __name__ == '__main__':\n\n import sys\n\n app = QApplication(sys.argv)\n wizard = MainWizard()\n wizard.show()\n sys.exit(app.exec_())" }, { "alpha_fraction": 0.2700729966163635, "alphanum_fraction": 0.29197078943252563, "avg_line_length": 10.454545021057129, "blob_id": "01d6d4c21af8f88649f1597f796c15db92f14b8e", "content_id": "59427e29062acd9f0ec69419579fb79495ba6084", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 137, "license_type": "no_license", "max_line_length": 23, "num_lines": 11, "path": "/src/R/HSROC/R/REFSTD_1.R", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "REFSTD_1 <-\r\nfunction (rs, n, t) \r\n{\r\n if (rs[[1]] == 1) {\r\n x = t\r\n }\r\n else {\r\n x = n\r\n }\r\n return(x)\r\n}\r\n" }, { "alpha_fraction": 0.5180723071098328, "alphanum_fraction": 0.5301204919815063, "avg_line_length": 11.833333015441895, "blob_id": "5a93747e30748e16ac0f5bf380aaa0cd25e5b8b6", "content_id": "936545fd589e7f90a788fbd1694b00f9385f1a6c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 83, "license_type": "no_license", "max_line_length": 21, "num_lines": 6, "path": "/src/R/HSROC/R/A.eij.fonction.R", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "A.eij.fonction <-\r\nfunction (a) \r\n{\r\n result = sum(a^2)\r\n return(result)\r\n}\r\n" }, { "alpha_fraction": 0.458781361579895, "alphanum_fraction": 0.48745518922805786, "avg_line_length": 19.461538314819336, "blob_id": "6f37a718de06491980392e36c9deba38c50c856a", "content_id": "71235d7a76fd46f3d04da1bef7ca9924e61740db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 279, "license_type": "no_license", "max_line_length": 64, "num_lines": 13, "path": "/src/R/HSROC/R/REFSTD_6_SP.R", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "REFSTD_6_SP <-\r\nfunction (refstd, N.refstd, A.Sp2, B.Sp2) \r\n{\r\n if (refstd == TRUE) {\r\n sp = 1\r\n }\r\n else {\r\n sp = rbeta(n = N.refstd, shape1 = A.Sp2, shape2 = B.Sp2)\r\n }\r\n results = list(sp)\r\n names(results) = list(\"SP\")\r\n return(results)\r\n}\r\n" }, { "alpha_fraction": 0.6646361947059631, "alphanum_fraction": 0.6955668330192566, "avg_line_length": 55.609195709228516, "blob_id": "59180c69e39f013992a185718b4347b5bce033fa", "content_id": "50aab32b347d0beb1423241f50ed8c22c817885c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14775, "license_type": "no_license", "max_line_length": 128, "num_lines": 261, "path": "/src/forms/ui_binary_data_form.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'binary_data_form2.ui'\n#\n# Created: Mon May 20 09:43:29 2013\n# by: PyQt4 UI code generator 4.10\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt4 import QtCore, QtGui\n\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n def _fromUtf8(s):\n return s\n\ntry:\n _encoding = QtGui.QApplication.UnicodeUTF8\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig, _encoding)\nexcept AttributeError:\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig)\n\nclass Ui_BinaryDataForm(object):\n def setupUi(self, BinaryDataForm):\n BinaryDataForm.setObjectName(_fromUtf8(\"BinaryDataForm\"))\n BinaryDataForm.resize(394, 382)\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(_fromUtf8(\":/misc/meta.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n BinaryDataForm.setWindowIcon(icon)\n self.verticalLayout_4 = QtGui.QVBoxLayout(BinaryDataForm)\n self.verticalLayout_4.setObjectName(_fromUtf8(\"verticalLayout_4\"))\n self.verticalLayout_3 = QtGui.QVBoxLayout()\n self.verticalLayout_3.setObjectName(_fromUtf8(\"verticalLayout_3\"))\n self.gridLayout_5 = QtGui.QGridLayout()\n self.gridLayout_5.setContentsMargins(-1, -1, -1, 9)\n self.gridLayout_5.setVerticalSpacing(4)\n self.gridLayout_5.setObjectName(_fromUtf8(\"gridLayout_5\"))\n self.event_lbl_3 = QtGui.QLabel(BinaryDataForm)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n self.event_lbl_3.setFont(font)\n self.event_lbl_3.setAlignment(QtCore.Qt.AlignCenter)\n self.event_lbl_3.setObjectName(_fromUtf8(\"event_lbl_3\"))\n self.gridLayout_5.addWidget(self.event_lbl_3, 0, 1, 1, 1)\n self.label_18 = QtGui.QLabel(BinaryDataForm)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n self.label_18.setFont(font)\n self.label_18.setAlignment(QtCore.Qt.AlignCenter)\n self.label_18.setObjectName(_fromUtf8(\"label_18\"))\n self.gridLayout_5.addWidget(self.label_18, 0, 2, 1, 1)\n self.label_19 = QtGui.QLabel(BinaryDataForm)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n font.setBold(True)\n font.setWeight(75)\n self.label_19.setFont(font)\n self.label_19.setAlignment(QtCore.Qt.AlignCenter)\n self.label_19.setObjectName(_fromUtf8(\"label_19\"))\n self.gridLayout_5.addWidget(self.label_19, 0, 3, 1, 1)\n self.label_20 = QtGui.QLabel(BinaryDataForm)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n self.label_20.setFont(font)\n self.label_20.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing)\n self.label_20.setObjectName(_fromUtf8(\"label_20\"))\n self.gridLayout_5.addWidget(self.label_20, 1, 0, 1, 1)\n self.raw_data_table = QtGui.QTableWidget(BinaryDataForm)\n sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.raw_data_table.sizePolicy().hasHeightForWidth())\n self.raw_data_table.setSizePolicy(sizePolicy)\n self.raw_data_table.setMinimumSize(QtCore.QSize(305, 93))\n self.raw_data_table.setMaximumSize(QtCore.QSize(305, 84))\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n self.raw_data_table.setFont(font)\n self.raw_data_table.setFrameShadow(QtGui.QFrame.Plain)\n self.raw_data_table.setLineWidth(1)\n self.raw_data_table.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.raw_data_table.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.raw_data_table.setProperty(\"showDropIndicator\", True)\n self.raw_data_table.setAlternatingRowColors(True)\n self.raw_data_table.setShowGrid(True)\n self.raw_data_table.setGridStyle(QtCore.Qt.DashDotLine)\n self.raw_data_table.setRowCount(3)\n self.raw_data_table.setColumnCount(3)\n self.raw_data_table.setObjectName(_fromUtf8(\"raw_data_table\"))\n self.raw_data_table.horizontalHeader().setVisible(False)\n self.raw_data_table.horizontalHeader().setHighlightSections(False)\n self.raw_data_table.verticalHeader().setVisible(False)\n self.gridLayout_5.addWidget(self.raw_data_table, 1, 1, 3, 3)\n self.label_21 = QtGui.QLabel(BinaryDataForm)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n self.label_21.setFont(font)\n self.label_21.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)\n self.label_21.setObjectName(_fromUtf8(\"label_21\"))\n self.gridLayout_5.addWidget(self.label_21, 2, 0, 1, 1)\n self.label_22 = QtGui.QLabel(BinaryDataForm)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n font.setBold(True)\n font.setWeight(75)\n self.label_22.setFont(font)\n self.label_22.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)\n self.label_22.setObjectName(_fromUtf8(\"label_22\"))\n self.gridLayout_5.addWidget(self.label_22, 3, 0, 1, 1)\n self.verticalLayout_3.addLayout(self.gridLayout_5)\n self.verticalLayout_4.addLayout(self.verticalLayout_3)\n self.horizontalLayout_17 = QtGui.QHBoxLayout()\n self.horizontalLayout_17.setObjectName(_fromUtf8(\"horizontalLayout_17\"))\n self.clear_Btn = QtGui.QPushButton(BinaryDataForm)\n self.clear_Btn.setObjectName(_fromUtf8(\"clear_Btn\"))\n self.horizontalLayout_17.addWidget(self.clear_Btn)\n spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)\n self.horizontalLayout_17.addItem(spacerItem)\n self.back_calc_btn = QtGui.QPushButton(BinaryDataForm)\n self.back_calc_btn.setEnabled(False)\n self.back_calc_btn.setObjectName(_fromUtf8(\"back_calc_btn\"))\n self.horizontalLayout_17.addWidget(self.back_calc_btn)\n self.verticalLayout_4.addLayout(self.horizontalLayout_17)\n self.horizontalLayout_13 = QtGui.QHBoxLayout()\n self.horizontalLayout_13.setSpacing(4)\n self.horizontalLayout_13.setObjectName(_fromUtf8(\"horizontalLayout_13\"))\n self.label_17 = QtGui.QLabel(BinaryDataForm)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n self.label_17.setFont(font)\n self.label_17.setAlignment(QtCore.Qt.AlignCenter)\n self.label_17.setObjectName(_fromUtf8(\"label_17\"))\n self.horizontalLayout_13.addWidget(self.label_17)\n self.effect_cbo_box = QtGui.QComboBox(BinaryDataForm)\n self.effect_cbo_box.setMinimumSize(QtCore.QSize(76, 20))\n self.effect_cbo_box.setMaximumSize(QtCore.QSize(76, 20))\n self.effect_cbo_box.setObjectName(_fromUtf8(\"effect_cbo_box\"))\n self.horizontalLayout_13.addWidget(self.effect_cbo_box)\n spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)\n self.horizontalLayout_13.addItem(spacerItem1)\n self.verticalLayout_4.addLayout(self.horizontalLayout_13)\n self.groupBox = QtGui.QGroupBox(BinaryDataForm)\n self.groupBox.setTitle(_fromUtf8(\"\"))\n self.groupBox.setObjectName(_fromUtf8(\"groupBox\"))\n self.gridLayout_6 = QtGui.QGridLayout(self.groupBox)\n self.gridLayout_6.setObjectName(_fromUtf8(\"gridLayout_6\"))\n spacerItem2 = QtGui.QSpacerItem(32, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)\n self.gridLayout_6.addItem(spacerItem2, 0, 0, 2, 1)\n self.ci_label = QtGui.QLabel(self.groupBox)\n self.ci_label.setObjectName(_fromUtf8(\"ci_label\"))\n self.gridLayout_6.addWidget(self.ci_label, 0, 2, 1, 1)\n spacerItem3 = QtGui.QSpacerItem(32, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)\n self.gridLayout_6.addItem(spacerItem3, 0, 3, 2, 1)\n self.horizontalLayout_15 = QtGui.QHBoxLayout()\n self.horizontalLayout_15.setObjectName(_fromUtf8(\"horizontalLayout_15\"))\n self.est_lbl = QtGui.QLabel(self.groupBox)\n self.est_lbl.setMinimumSize(QtCore.QSize(0, 20))\n self.est_lbl.setMaximumSize(QtCore.QSize(16777215, 20))\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n self.est_lbl.setFont(font)\n self.est_lbl.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft)\n self.est_lbl.setObjectName(_fromUtf8(\"est_lbl\"))\n self.horizontalLayout_15.addWidget(self.est_lbl)\n self.effect_txt_box = QtGui.QLineEdit(self.groupBox)\n self.effect_txt_box.setMinimumSize(QtCore.QSize(50, 20))\n self.effect_txt_box.setMaximumSize(QtCore.QSize(50, 20))\n self.effect_txt_box.setObjectName(_fromUtf8(\"effect_txt_box\"))\n self.horizontalLayout_15.addWidget(self.effect_txt_box)\n self.gridLayout_6.addLayout(self.horizontalLayout_15, 1, 1, 1, 1)\n self.horizontalLayout_16 = QtGui.QHBoxLayout()\n self.horizontalLayout_16.setObjectName(_fromUtf8(\"horizontalLayout_16\"))\n self.low_lbl = QtGui.QLabel(self.groupBox)\n self.low_lbl.setMaximumSize(QtCore.QSize(16777215, 20))\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n self.low_lbl.setFont(font)\n self.low_lbl.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft)\n self.low_lbl.setObjectName(_fromUtf8(\"low_lbl\"))\n self.horizontalLayout_16.addWidget(self.low_lbl)\n self.low_txt_box = QtGui.QLineEdit(self.groupBox)\n self.low_txt_box.setMinimumSize(QtCore.QSize(50, 20))\n self.low_txt_box.setMaximumSize(QtCore.QSize(50, 20))\n self.low_txt_box.setObjectName(_fromUtf8(\"low_txt_box\"))\n self.horizontalLayout_16.addWidget(self.low_txt_box)\n self.label_23 = QtGui.QLabel(self.groupBox)\n self.label_23.setObjectName(_fromUtf8(\"label_23\"))\n self.horizontalLayout_16.addWidget(self.label_23)\n self.high_txt_box = QtGui.QLineEdit(self.groupBox)\n self.high_txt_box.setMinimumSize(QtCore.QSize(50, 20))\n self.high_txt_box.setMaximumSize(QtCore.QSize(50, 20))\n self.high_txt_box.setObjectName(_fromUtf8(\"high_txt_box\"))\n self.horizontalLayout_16.addWidget(self.high_txt_box)\n self.high_lbl = QtGui.QLabel(self.groupBox)\n self.high_lbl.setMaximumSize(QtCore.QSize(16777215, 20))\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n self.high_lbl.setFont(font)\n self.high_lbl.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft)\n self.high_lbl.setObjectName(_fromUtf8(\"high_lbl\"))\n self.horizontalLayout_16.addWidget(self.high_lbl)\n self.gridLayout_6.addLayout(self.horizontalLayout_16, 1, 2, 1, 1)\n self.verticalLayout_4.addWidget(self.groupBox)\n self.line = QtGui.QFrame(BinaryDataForm)\n self.line.setFrameShape(QtGui.QFrame.HLine)\n self.line.setFrameShadow(QtGui.QFrame.Sunken)\n self.line.setObjectName(_fromUtf8(\"line\"))\n self.verticalLayout_4.addWidget(self.line)\n self.horizontalLayout_14 = QtGui.QHBoxLayout()\n self.horizontalLayout_14.setObjectName(_fromUtf8(\"horizontalLayout_14\"))\n self.inconsistencyLabel = QtGui.QLabel(BinaryDataForm)\n font = QtGui.QFont()\n font.setBold(True)\n font.setWeight(75)\n self.inconsistencyLabel.setFont(font)\n self.inconsistencyLabel.setObjectName(_fromUtf8(\"inconsistencyLabel\"))\n self.horizontalLayout_14.addWidget(self.inconsistencyLabel)\n spacerItem4 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)\n self.horizontalLayout_14.addItem(spacerItem4)\n self.buttonBox = QtGui.QDialogButtonBox(BinaryDataForm)\n self.buttonBox.setOrientation(QtCore.Qt.Horizontal)\n self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)\n self.buttonBox.setObjectName(_fromUtf8(\"buttonBox\"))\n self.horizontalLayout_14.addWidget(self.buttonBox)\n self.verticalLayout_4.addLayout(self.horizontalLayout_14)\n\n self.retranslateUi(BinaryDataForm)\n QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8(\"rejected()\")), BinaryDataForm.reject)\n QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8(\"accepted()\")), BinaryDataForm.accept)\n QtCore.QMetaObject.connectSlotsByName(BinaryDataForm)\n BinaryDataForm.setTabOrder(self.raw_data_table, self.clear_Btn)\n BinaryDataForm.setTabOrder(self.clear_Btn, self.back_calc_btn)\n BinaryDataForm.setTabOrder(self.back_calc_btn, self.effect_cbo_box)\n BinaryDataForm.setTabOrder(self.effect_cbo_box, self.effect_txt_box)\n BinaryDataForm.setTabOrder(self.effect_txt_box, self.low_txt_box)\n BinaryDataForm.setTabOrder(self.low_txt_box, self.high_txt_box)\n BinaryDataForm.setTabOrder(self.high_txt_box, self.buttonBox)\n\n def retranslateUi(self, BinaryDataForm):\n BinaryDataForm.setWindowTitle(_translate(\"BinaryDataForm\", \"Binary Data\", None))\n self.event_lbl_3.setText(_translate(\"BinaryDataForm\", \"event\", None))\n self.label_18.setText(_translate(\"BinaryDataForm\", \"no event\", None))\n self.label_19.setText(_translate(\"BinaryDataForm\", \"total\", None))\n self.label_20.setText(_translate(\"BinaryDataForm\", \"group 1\", None))\n self.label_21.setText(_translate(\"BinaryDataForm\", \"group 2\", None))\n self.label_22.setText(_translate(\"BinaryDataForm\", \"total\", None))\n self.clear_Btn.setText(_translate(\"BinaryDataForm\", \"Clear Form\", None))\n self.back_calc_btn.setText(_translate(\"BinaryDataForm\", \"back-calculate table\", None))\n self.label_17.setText(_translate(\"BinaryDataForm\", \"effect\", None))\n self.ci_label.setToolTip(_translate(\"BinaryDataForm\", \"Use the box to the left to set the % confidence interval\", None))\n self.ci_label.setText(_translate(\"BinaryDataForm\", \"X% Confidence Interval\", None))\n self.est_lbl.setText(_translate(\"BinaryDataForm\", \"est.\", None))\n self.low_lbl.setText(_translate(\"BinaryDataForm\", \"[\", None))\n self.label_23.setText(_translate(\"BinaryDataForm\", \",\", None))\n self.high_lbl.setText(_translate(\"BinaryDataForm\", \"]\", None))\n self.inconsistencyLabel.setText(_translate(\"BinaryDataForm\", \"INCONSISTENT FORM\", None))\n\nimport icons_rc\n" }, { "alpha_fraction": 0.5796545147895813, "alphanum_fraction": 0.5815739035606384, "avg_line_length": 30.0625, "blob_id": "a7e14ee4a3bbea6ff676abbc101d3ea71638e561", "content_id": "fbe05a9598d50267c653bd16936c7c23ea6b47c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 521, "license_type": "no_license", "max_line_length": 58, "num_lines": 16, "path": "/src/easter_egg.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "from PyQt4.Qt import QDialog\nimport forms.ui_tom_form\n\nclass TomDialog(QDialog, forms.ui_tom_form.Ui_Dialog):\n def __init__(self, parent=None):\n super(TomDialog, self).__init__(parent)\n self.setupUi(self)\n \n#class PersonDialog(QDialog, forms.ui_tom_form.Ui_Dialog):\n# def __init__(self, parent=None, person=\"tom\"):\n# super(PersonDialog, self).__init__(parent)\n# self.setupUi(self)\n# \n# personPixmap = \n# \n# self.label.setPixmap\n \n \n \n \n " }, { "alpha_fraction": 0.6634615659713745, "alphanum_fraction": 0.6873827576637268, "avg_line_length": 45.85714340209961, "blob_id": "8dcd04823710267f3a76dbcf9f8d785ad641f4b0", "content_id": "e0ca9c4ff21d1f0d5615e2672f5c3ca355bb5647", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4264, "license_type": "no_license", "max_line_length": 115, "num_lines": 91, "path": "/src/forms/ui_new_covariate.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'new_covariate_dlg.ui'\n#\n# Created: Wed Apr 17 14:37:20 2013\n# by: PyQt4 UI code generator 4.10\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt4 import QtCore, QtGui\n\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n def _fromUtf8(s):\n return s\n\ntry:\n _encoding = QtGui.QApplication.UnicodeUTF8\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig, _encoding)\nexcept AttributeError:\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig)\n\nclass Ui_new_covariate_dialog(object):\n def setupUi(self, new_covariate_dialog):\n new_covariate_dialog.setObjectName(_fromUtf8(\"new_covariate_dialog\"))\n new_covariate_dialog.setEnabled(True)\n new_covariate_dialog.resize(301, 132)\n new_covariate_dialog.setMinimumSize(QtCore.QSize(301, 132))\n new_covariate_dialog.setMaximumSize(QtCore.QSize(301, 132))\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n new_covariate_dialog.setFont(font)\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(_fromUtf8(\":/images/meta.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n new_covariate_dialog.setWindowIcon(icon)\n self.buttonBox = QtGui.QDialogButtonBox(new_covariate_dialog)\n self.buttonBox.setGeometry(QtCore.QRect(10, 90, 281, 32))\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n self.buttonBox.setFont(font)\n self.buttonBox.setOrientation(QtCore.Qt.Horizontal)\n self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)\n self.buttonBox.setObjectName(_fromUtf8(\"buttonBox\"))\n self.layoutWidget = QtGui.QWidget(new_covariate_dialog)\n self.layoutWidget.setGeometry(QtCore.QRect(10, 10, 281, 71))\n self.layoutWidget.setObjectName(_fromUtf8(\"layoutWidget\"))\n self.gridLayout = QtGui.QGridLayout(self.layoutWidget)\n self.gridLayout.setMargin(0)\n self.gridLayout.setObjectName(_fromUtf8(\"gridLayout\"))\n self.label_2 = QtGui.QLabel(self.layoutWidget)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n font.setPointSize(10)\n self.label_2.setFont(font)\n self.label_2.setObjectName(_fromUtf8(\"label_2\"))\n self.gridLayout.addWidget(self.label_2, 0, 0, 1, 1)\n self.covariate_name_le = QtGui.QLineEdit(self.layoutWidget)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n self.covariate_name_le.setFont(font)\n self.covariate_name_le.setAlignment(QtCore.Qt.AlignCenter)\n self.covariate_name_le.setObjectName(_fromUtf8(\"covariate_name_le\"))\n self.gridLayout.addWidget(self.covariate_name_le, 0, 1, 1, 1)\n self.label = QtGui.QLabel(self.layoutWidget)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n font.setPointSize(10)\n self.label.setFont(font)\n self.label.setObjectName(_fromUtf8(\"label\"))\n self.gridLayout.addWidget(self.label, 1, 0, 1, 1)\n self.datatype_cbo_box = QtGui.QComboBox(self.layoutWidget)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n self.datatype_cbo_box.setFont(font)\n self.datatype_cbo_box.setObjectName(_fromUtf8(\"datatype_cbo_box\"))\n self.gridLayout.addWidget(self.datatype_cbo_box, 1, 1, 1, 1)\n\n self.retranslateUi(new_covariate_dialog)\n QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8(\"accepted()\")), new_covariate_dialog.accept)\n QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8(\"rejected()\")), new_covariate_dialog.reject)\n QtCore.QMetaObject.connectSlotsByName(new_covariate_dialog)\n\n def retranslateUi(self, new_covariate_dialog):\n new_covariate_dialog.setWindowTitle(_translate(\"new_covariate_dialog\", \"add new covariate\", None))\n self.label_2.setText(_translate(\"new_covariate_dialog\", \"covariate name:\", None))\n self.label.setText(_translate(\"new_covariate_dialog\", \"type of covariate:\", None))\n\nimport icons_rc\n" }, { "alpha_fraction": 0.6957862377166748, "alphanum_fraction": 0.7132579684257507, "avg_line_length": 55.90058517456055, "blob_id": "472b11de631a14d1a1f1d2e4507b47111f2ae6d5", "content_id": "a332fad1f0c463263fbda4669222457ead4e7bb0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9730, "license_type": "no_license", "max_line_length": 226, "num_lines": 171, "path": "/src/forms/ui_csv_import_page.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'csv_import_page.ui'\n#\n# Created: Thu Jun 27 10:21:34 2013\n# by: PyQt4 UI code generator 4.10.1\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt4 import QtCore, QtGui\n\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n def _fromUtf8(s):\n return s\n\ntry:\n _encoding = QtGui.QApplication.UnicodeUTF8\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig, _encoding)\nexcept AttributeError:\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig)\n\nclass Ui_WizardPage(object):\n def setupUi(self, WizardPage):\n WizardPage.setObjectName(_fromUtf8(\"WizardPage\"))\n WizardPage.resize(646, 630)\n WizardPage.setMinimumSize(QtCore.QSize(500, 630))\n self.verticalLayout_2 = QtGui.QVBoxLayout(WizardPage)\n self.verticalLayout_2.setObjectName(_fromUtf8(\"verticalLayout_2\"))\n self.instructions = QtGui.QLabel(WizardPage)\n self.instructions.setWordWrap(True)\n self.instructions.setObjectName(_fromUtf8(\"instructions\"))\n self.verticalLayout_2.addWidget(self.instructions)\n self.groupBox = QtGui.QGroupBox(WizardPage)\n sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Minimum)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.groupBox.sizePolicy().hasHeightForWidth())\n self.groupBox.setSizePolicy(sizePolicy)\n self.groupBox.setMinimumSize(QtCore.QSize(0, 200))\n self.groupBox.setObjectName(_fromUtf8(\"groupBox\"))\n self.verticalLayout = QtGui.QVBoxLayout(self.groupBox)\n self.verticalLayout.setObjectName(_fromUtf8(\"verticalLayout\"))\n self.from_excel_chkbx = QtGui.QCheckBox(self.groupBox)\n self.from_excel_chkbx.setObjectName(_fromUtf8(\"from_excel_chkbx\"))\n self.verticalLayout.addWidget(self.from_excel_chkbx)\n self.has_headers_chkbx = QtGui.QCheckBox(self.groupBox)\n self.has_headers_chkbx.setChecked(True)\n self.has_headers_chkbx.setObjectName(_fromUtf8(\"has_headers_chkbx\"))\n self.verticalLayout.addWidget(self.has_headers_chkbx)\n self.horizontalLayout = QtGui.QHBoxLayout()\n self.horizontalLayout.setObjectName(_fromUtf8(\"horizontalLayout\"))\n self.delimter_lbl = QtGui.QLabel(self.groupBox)\n self.delimter_lbl.setObjectName(_fromUtf8(\"delimter_lbl\"))\n self.horizontalLayout.addWidget(self.delimter_lbl)\n self.delimter_le = QtGui.QLineEdit(self.groupBox)\n sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.delimter_le.sizePolicy().hasHeightForWidth())\n self.delimter_le.setSizePolicy(sizePolicy)\n self.delimter_le.setMaximumSize(QtCore.QSize(20, 16777215))\n self.delimter_le.setObjectName(_fromUtf8(\"delimter_le\"))\n self.horizontalLayout.addWidget(self.delimter_le)\n spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)\n self.horizontalLayout.addItem(spacerItem)\n self.verticalLayout.addLayout(self.horizontalLayout)\n self.horizontalLayout_3 = QtGui.QHBoxLayout()\n self.horizontalLayout_3.setObjectName(_fromUtf8(\"horizontalLayout_3\"))\n self.label_2 = QtGui.QLabel(self.groupBox)\n self.label_2.setObjectName(_fromUtf8(\"label_2\"))\n self.horizontalLayout_3.addWidget(self.label_2)\n self.quotechar_le = QtGui.QLineEdit(self.groupBox)\n sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.quotechar_le.sizePolicy().hasHeightForWidth())\n self.quotechar_le.setSizePolicy(sizePolicy)\n self.quotechar_le.setMaximumSize(QtCore.QSize(20, 16777215))\n self.quotechar_le.setObjectName(_fromUtf8(\"quotechar_le\"))\n self.horizontalLayout_3.addWidget(self.quotechar_le)\n spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)\n self.horizontalLayout_3.addItem(spacerItem1)\n self.verticalLayout.addLayout(self.horizontalLayout_3)\n self.horizontalLayout_2 = QtGui.QHBoxLayout()\n self.horizontalLayout_2.setObjectName(_fromUtf8(\"horizontalLayout_2\"))\n self.file_path_lbl = QtGui.QLabel(self.groupBox)\n self.file_path_lbl.setObjectName(_fromUtf8(\"file_path_lbl\"))\n self.horizontalLayout_2.addWidget(self.file_path_lbl)\n self.select_file_btn = QtGui.QPushButton(self.groupBox)\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(_fromUtf8(\":/function_icon_set/function_icon_set/folder_48.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.select_file_btn.setIcon(icon)\n self.select_file_btn.setObjectName(_fromUtf8(\"select_file_btn\"))\n self.horizontalLayout_2.addWidget(self.select_file_btn)\n self.verticalLayout.addLayout(self.horizontalLayout_2)\n self.verticalLayout_2.addWidget(self.groupBox)\n self.groupBox_2 = QtGui.QGroupBox(WizardPage)\n self.groupBox_2.setMinimumSize(QtCore.QSize(0, 120))\n self.groupBox_2.setObjectName(_fromUtf8(\"groupBox_2\"))\n self.horizontalLayout_4 = QtGui.QHBoxLayout(self.groupBox_2)\n self.horizontalLayout_4.setObjectName(_fromUtf8(\"horizontalLayout_4\"))\n self.required_fmt_table = QtGui.QTableWidget(self.groupBox_2)\n sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.required_fmt_table.sizePolicy().hasHeightForWidth())\n self.required_fmt_table.setSizePolicy(sizePolicy)\n self.required_fmt_table.setMaximumSize(QtCore.QSize(16777215, 70))\n font = QtGui.QFont()\n font.setPointSize(8)\n font.setBold(True)\n font.setWeight(75)\n self.required_fmt_table.setFont(font)\n self.required_fmt_table.setObjectName(_fromUtf8(\"required_fmt_table\"))\n self.required_fmt_table.setColumnCount(0)\n self.required_fmt_table.setRowCount(0)\n self.horizontalLayout_4.addWidget(self.required_fmt_table)\n self.verticalLayout_2.addWidget(self.groupBox_2)\n self.preview_grp_box = QtGui.QGroupBox(WizardPage)\n self.preview_grp_box.setObjectName(_fromUtf8(\"preview_grp_box\"))\n self.horizontalLayout_5 = QtGui.QHBoxLayout(self.preview_grp_box)\n self.horizontalLayout_5.setObjectName(_fromUtf8(\"horizontalLayout_5\"))\n self.preview_table = QtGui.QTableWidget(self.preview_grp_box)\n sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.preview_table.sizePolicy().hasHeightForWidth())\n self.preview_table.setSizePolicy(sizePolicy)\n font = QtGui.QFont()\n font.setPointSize(8)\n font.setBold(True)\n font.setWeight(75)\n self.preview_table.setFont(font)\n self.preview_table.setObjectName(_fromUtf8(\"preview_table\"))\n self.preview_table.setColumnCount(0)\n self.preview_table.setRowCount(0)\n self.horizontalLayout_5.addWidget(self.preview_table)\n self.verticalLayout_2.addWidget(self.preview_grp_box)\n self.delimter_lbl.setBuddy(self.delimter_le)\n self.label_2.setBuddy(self.delimter_le)\n self.file_path_lbl.setBuddy(self.select_file_btn)\n\n self.retranslateUi(WizardPage)\n QtCore.QMetaObject.connectSlotsByName(WizardPage)\n\n def retranslateUi(self, WizardPage):\n WizardPage.setWindowTitle(_translate(\"WizardPage\", \"WizardPage\", None))\n WizardPage.setTitle(_translate(\"WizardPage\", \"Choose CSV file\", None))\n self.instructions.setText(_translate(\"WizardPage\", \"Please select a csv file to import:\\n\"\n\"The CSV should match the format of the spreadsheet currently displayed under \\\"required CSV format\\\". Don\\'t worry if the column titles are not the same, these can be changed later.\\n\"\n\"\\n\"\n\"Also, things will be OK if you don\\'t have data for all the columns, however if you have data for a column to the right of a column for which you do not have data, the left hand column(s) must exist (but have blank cells).\\n\"\n\"\\n\"\n\"Additional columns will be treated as covariates.\", None))\n self.groupBox.setTitle(_translate(\"WizardPage\", \"Import Options\", None))\n self.from_excel_chkbx.setText(_translate(\"WizardPage\", \"csv exported from excel?\", None))\n self.has_headers_chkbx.setText(_translate(\"WizardPage\", \"Has column labels?\", None))\n self.delimter_lbl.setText(_translate(\"WizardPage\", \"Delimter:\", None))\n self.delimter_le.setText(_translate(\"WizardPage\", \",\", None))\n self.label_2.setText(_translate(\"WizardPage\", \"Quote Character:\", None))\n self.quotechar_le.setText(_translate(\"WizardPage\", \"\\\"\", None))\n self.file_path_lbl.setText(_translate(\"WizardPage\", \"No file has been chosen.\", None))\n self.select_file_btn.setText(_translate(\"WizardPage\", \"select csv file ...\", None))\n self.groupBox_2.setTitle(_translate(\"WizardPage\", \"Required CSV format\", None))\n self.preview_grp_box.setTitle(_translate(\"WizardPage\", \"Preview of imported data\", None))\n\nimport icons_rc\n" }, { "alpha_fraction": 0.5790408253669739, "alphanum_fraction": 0.5825932621955872, "avg_line_length": 36, "blob_id": "18c881d2b8f2ad79a79ffbf7e590b7550e22eaad", "content_id": "1f279816a481066ccfcd4733eb2605ad00a613f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 563, "license_type": "no_license", "max_line_length": 103, "num_lines": 15, "path": "/src/new_outcome_form.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "from PyQt4.Qt import *\nimport forms.ui_new_outcome\n\nclass AddNewOutcomeForm(QDialog, forms.ui_new_outcome.Ui_Dialog):\n \n def __init__(self, parent=None):\n super(AddNewOutcomeForm, self).__init__(parent)\n self.setupUi(self)\n self._populate_combo_box()\n\n \n def _populate_combo_box(self):\n for name, item_id in zip([QString(s) for s in [\"Binary\", \"Continuous\", \"Diagnostic\", \"Other\"]],\n [QVariant(i) for i in range(4)]):\n self.datatype_cbo_box.addItem(name, item_id)\n " }, { "alpha_fraction": 0.5301937460899353, "alphanum_fraction": 0.5349115133285522, "avg_line_length": 45.749019622802734, "blob_id": "68c050f76491644572e1fd57e53e952946136a39", "content_id": "60a357ce3ee02db7699b4b01b14719fe9b5d4dbc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 47692, "license_type": "no_license", "max_line_length": 300, "num_lines": 1020, "path": "/src/continuous_data_form.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "##################################################\n#\n# Byron C. Wallace\n# George Dietz\n# OpenMeta[analyst]\n# ---\n# Continuous data form module; for flexible entry of continuous\n# outcome data.\n#\n# TODO there is some redundancy here with binary_data_form\n# should probably refactor\n# \n# Note that we don't make use of the table/custom model\n# design here. Rather, we edit the ma_unit object\n# directly, based on what the user inputs. This seemed a more\n# straightforward approach, because the table itself displays\n# many fields that do not ultimately belong in the raw_data --\n# it's mostly imputation going on here.\n#\n##################################################\n\n#import pdb\nimport sys\nimport copy\n\nfrom PyQt4.Qt import *\nfrom functools import partial\nimport calculator_routines as calc_fncs\n\nimport meta_py_r\nfrom meta_globals import *\nimport forms.ui_continuous_data_form\nimport forms.ui_choose_back_calc_result_form\n\ndefault_col_width = 65\n\n# because the output from R is a string (\"TRUE\"/\"FALSE\")\n# Remove this? GD\n_is_true = lambda x: x == \"TRUE\"\n\ndef is_list(x):\n try:\n list(x)\n return True\n except:\n return False\n\nclass ContinuousDataForm(QDialog, forms.ui_continuous_data_form.Ui_ContinuousDataForm):\n def __init__(self, ma_unit, cur_txs, cur_group_str, cur_effect, conf_level=None, parent=None):\n super(ContinuousDataForm, self).__init__(parent)\n self.setupUi(self)\n self.setup_signals_and_slots()\n \n if conf_level is None:\n QMessageBox.critical(self, \"insufficient arguments\", \"Confidence interval must be specified\")\n raise ValueError(\"Confidence interval must be specified\")\n self.conf_level = conf_level\n self.mult = meta_py_r.get_mult_from_r(self.conf_level)\n \n self.ma_unit = ma_unit\n self.cur_groups = cur_txs\n self.cur_effect = cur_effect\n self.group_str = cur_group_str\n self.metric_parameter = None\n self.entry_widgets = [self.simple_table, self.g1_pre_post_table,\n self.g2_pre_post_table, self.effect_txt_box,\n self.low_txt_box, self.high_txt_box,\n self.correlation_pre_post]\n self.text_boxes = [self.low_txt_box, self.high_txt_box,\n self.effect_txt_box, self.correlation_pre_post]\n self.ci_label.setText(\"{0:.1f}% Confidence Interval\".format(self.conf_level))\n self.current_item_data = {}\n \n # Set the table headers to reflect the group names\n groups_names = QStringList(self.cur_groups)\n self.simple_table.setVerticalHeaderLabels(groups_names)\n \n self.tables = [self.simple_table, self.g1_pre_post_table, self.g2_pre_post_table]\n for table in self.tables:\n self._set_col_widths(table)\n \n self.grp_1_lbl.setText(QString(self.cur_groups[0]))\n self.grp_2_lbl.setText(QString(self.cur_groups[1]))\n \n self.setup_clear_button_palettes() # Color for clear_button_pallette\n self.initialize_form() # initialize cells to empty items \n self.undoStack = QUndoStack(self)\n \n self.update_raw_data()\n self._populate_effect_data()\n self.set_current_effect()\n self.impute_data()\n self.enable_back_calculation_btn()\n \n print(\"current effect: %s\" % str(self.cur_effect))\n # Hide pre-post for SMD until it is implemented\n if self.cur_effect not in [\"MD\",\"SMD\"]:\n self.grp_box_pre_post.setVisible(False)\n self.adjustSize()\n \n self.current_correlation = self._get_correlation_str()\n \n def initialize_form(self, table=None):\n ''' Initialize all cells to empty items\n If table is specified, only clear that table, leave the others alone'''\n \n if table is None:\n for row in range(2):\n for col in range(self.simple_table.columnCount()):\n self._set_val(row, col, None)\n self._set_val(row, col, None, self.g1_pre_post_table)\n self._set_val(row, col, None, self.g2_pre_post_table)\n else:\n for row in range(2):\n for col in range(self.table.columnCount()):\n self._set_val(row, col, None, table)\n \n for txt_box in self.text_boxes:\n txt_box.setText(QString(\"\"))\n if txt_box == self.correlation_pre_post:\n txt_box.setText(QString(\"0.0\"))\n \n def setup_signals_and_slots(self):\n QObject.connect(self.simple_table, SIGNAL(\"cellChanged (int, int)\"), self._cell_changed)\n QObject.connect(self.g1_pre_post_table, SIGNAL(\"cellChanged (int, int)\"), lambda row,col: self.impute_pre_post_data(self.g1_pre_post_table, 0, row, col))\n QObject.connect(self.g2_pre_post_table, SIGNAL(\"cellChanged (int, int)\"), lambda row,col: self.impute_pre_post_data(self.g2_pre_post_table, 1, row, col))\n \n QObject.connect(self.effect_cbo_box, SIGNAL(\"currentIndexChanged(QString)\"), self.effect_changed)\n QObject.connect(self.clear_Btn, SIGNAL(\"clicked()\"), self.clear_form)\n QObject.connect(self.back_calc_btn, SIGNAL(\"clicked()\"), lambda: self.enable_back_calculation_btn(engage=True) )\n \n QObject.connect(self.effect_txt_box, SIGNAL(\"editingFinished()\"), lambda: self.val_changed(\"est\"))\n QObject.connect(self.low_txt_box, SIGNAL(\"editingFinished()\"), lambda: self.val_changed(\"lower\"))\n QObject.connect(self.high_txt_box, SIGNAL(\"editingFinished()\"), lambda: self.val_changed(\"upper\"))\n QObject.connect(self.correlation_pre_post, SIGNAL(\"editingFinished()\"), lambda: self.val_changed(\"correlation_pre_post\")) \n \n # Add undo/redo actions\n undo = QAction(self)\n redo = QAction(self)\n undo.setShortcut(QKeySequence.Undo)\n redo.setShortcut(QKeySequence.Redo)\n self.addAction(undo)\n self.addAction(redo)\n QObject.connect(undo, SIGNAL(\"triggered()\"), self.undo)\n QObject.connect(redo, SIGNAL(\"triggered()\"), self.redo)\n \n \n def _set_col_widths(self, table):\n for column in range(table.columnCount()):\n table.setColumnWidth(column, default_col_width)\n \n def _populate_effect_data(self):\n effect_names = self.ma_unit.get_effect_names()\n q_effects = sorted([QString(effect_str) for effect_str in effect_names])\n self.effect_cbo_box.blockSignals(True)\n self.effect_cbo_box.addItems(q_effects)\n self.effect_cbo_box.blockSignals(False)\n self.effect_cbo_box.setCurrentIndex(q_effects.index(QString(self.cur_effect)))\n \n def effect_changed(self):\n self.cur_effect = unicode(self.effect_cbo_box.currentText().toUtf8(), \"utf-8\")\n \n # hide pre-post for SMD\n if self.cur_effect not in [\"MD\",\"SMD\"]:\n self.grp_box_pre_post.setVisible(False)\n self.adjustSize()\n else:\n self.grp_box_pre_post.setVisible(True)\n self.adjustSize()\n \n self.group_str = self.get_cur_group_str()\n\n self.try_to_update_cur_outcome()\n self.set_current_effect()\n self.enable_txt_box_input()\n \n self.metric_parameter = None # zusammen\n self.enable_back_calculation_btn() # zusammen\n \n \n def _text_box_value_is_between_bounds(self, val_str, new_text):\n display_scale_val = \"\"\n \n get_disp_scale_val_if_valid = partial(\n calc_fncs.evaluate, new_text=new_text, ma_unit=self.ma_unit,\n curr_effect=self.cur_effect, group_str=self.group_str,\n conv_to_disp_scale = partial(meta_py_r.continuous_convert_scale,\n metric_name=self.cur_effect,\n convert_to=\"display.scale\"),\n parent=self, mult=self.mult)\n\n calc_fncs.block_signals(self.entry_widgets, True)\n try:\n if val_str == \"est\" and not is_empty(new_text):\n display_scale_val = get_disp_scale_val_if_valid(ci_param='est')\n elif val_str == \"lower\" and not is_empty(new_text):\n display_scale_val = get_disp_scale_val_if_valid(ci_param='low')\n elif val_str == \"upper\" and not is_empty(new_text):\n display_scale_val = get_disp_scale_val_if_valid(ci_param='high')\n elif val_str == \"correlation_pre_post\" and not is_empty(new_text):\n get_disp_scale_val_if_valid(opt_cmp_fn = lambda x: -1<=float(x)<=1,\n opt_cmp_msg=\"Correlation must be between -1 and +1\")\n except:\n calc_fncs.block_signals(self.entry_widgets, False)\n return False, False\n calc_fncs.block_signals(self.entry_widgets, False)\n print(\"Val_str: %s\" % val_str)\n return True,display_scale_val\n \n def _get_txt_from_val_str(self, val_str):\n if val_str == \"est\":\n return str(self.effect_txt_box.text())\n elif val_str == \"lower\":\n return str(self.low_txt_box.text())\n elif val_str == \"upper\":\n return str(self.high_txt_box.text())\n elif val_str == \"correlation_pre_post\":\n return str(self.correlation_pre_post.text())\n return None # should never happen\n \n def val_changed(self, val_str):\n # Backup form state\n old_ma_unit, old_tables_data = self._save_ma_unit_and_table_states(\n tables = [self.simple_table,\n self.g1_pre_post_table,\n self.g2_pre_post_table],\n ma_unit = self.ma_unit, \n use_old_value=False)\n old_correlation = self.current_correlation\n \n new_text = self._get_txt_from_val_str(val_str)\n \n \n no_errors, display_scale_val = self._text_box_value_is_between_bounds(val_str, new_text)\n if no_errors is False:\n print(\"There was an error while in val_changed\")\n self.restore_ma_unit_and_tables(old_ma_unit,old_tables_data, old_correlation)\n calc_fncs.block_signals(self.entry_widgets, True)\n if val_str == \"est\":\n self.effect_txt_box.setFocus()\n elif val_str == \"lower\":\n self.low_txt_box.setFocus()\n elif val_str == \"upper\":\n self.high_txt_box.setFocus()\n elif val_str == \"correlation_pre_post\":\n self.correlation_pre_post.setFocus()\n calc_fncs.block_signals(self.entry_widgets, False)\n return\n \n # If we got to this point it means everything is ok so far\n try:\n if display_scale_val not in EMPTY_VALS:\n display_scale_val = float(display_scale_val)\n else:\n display_scale_val = None\n except ValueError:\n # a number wasn't entered; ignore\n # should probably clear out the box here, too.\n print \"fail.\"\n return None\n \n calc_scale_val = meta_py_r.continuous_convert_scale(display_scale_val,\n self.cur_effect, convert_to=\"calc.scale\")\n \n if val_str == \"est\":\n self.ma_unit.set_effect(self.cur_effect, self.group_str, calc_scale_val)\n elif val_str == \"lower\":\n self.ma_unit.set_lower(self.cur_effect, self.group_str, calc_scale_val)\n elif val_str == \"upper\":\n self.ma_unit.set_upper(self.cur_effect, self.group_str, calc_scale_val)\n elif val_str == \"correlation_pre_post\":\n print \"ok -- correlation set to %s\" % self.correlation_pre_post.text()\n # Recompute the estimates\n self.impute_pre_post_data(self.g1_pre_post_table, 0)\n self.impute_pre_post_data(self.g2_pre_post_table, 1)\n \n self.impute_data() #### experimental\n \n\n new_ma_unit, new_tables_data = self._save_ma_unit_and_table_states(\n tables = [self.simple_table,\n self.g1_pre_post_table,\n self.g2_pre_post_table],\n ma_unit = self.ma_unit, \n use_old_value=False)\n new_correlation = self._get_correlation_str()\n restore_old_f = lambda: self.restore_ma_unit_and_tables(old_ma_unit, old_tables_data, old_correlation)\n restore_new_f = lambda: self.restore_ma_unit_and_tables(new_ma_unit, new_tables_data, new_correlation)\n command = calc_fncs.CommandFieldChanged(restore_new_f=restore_new_f,\n restore_old_f=restore_old_f,\n parent=self)\n self.undoStack.push(command)\n \n self.current_correlation = new_correlation\n \n def setup_clear_button_palettes(self):\n # Color for clear_button_pallette\n self.orig_palette = self.clear_Btn.palette()\n self.pushme_palette = QPalette()\n self.pushme_palette.setColor(QPalette.ButtonText,Qt.red)\n self.set_clear_btn_color()\n \n def set_clear_btn_color(self):\n if calc_fncs._input_fields_disabled(self.simple_table, [self.effect_txt_box, self.low_txt_box, self.high_txt_box]):\n self.clear_Btn.setPalette(self.pushme_palette)\n else:\n self.clear_Btn.setPalette(self.orig_palette)\n\n def set_current_effect(self):\n txt_boxes = dict(effect=self.effect_txt_box, lower=self.low_txt_box, upper=self.high_txt_box)\n calc_fncs.helper_set_current_effect(ma_unit=self.ma_unit,\n txt_boxes=txt_boxes,\n current_effect=self.cur_effect,\n group_str=self.group_str,\n data_type=\"continuous\",\n mult=self.mult)\n \n self.change_row_color_according_to_metric()\n \n def change_row_color_according_to_metric(self):\n # Change color of bottom rows of table according one or two-arm metric\n curr_effect_is_one_arm = self.cur_effect in CONTINUOUS_ONE_ARM_METRICS\n row = 1\n for col in range(len(self.get_column_header_strs(self.simple_table))):\n item = self.simple_table.item(row, col)\n if curr_effect_is_one_arm:\n item.setBackground(QBrush(QColor(Qt.gray)))\n else:\n # just reset the item\n text = item.text()\n self.simple_table.blockSignals(True)\n popped_item = self.simple_table.takeItem(row, col)\n self.simple_table.blockSignals(False)\n del popped_item\n self._set_val(row, col, text, self.simple_table)\n \n def update_raw_data(self):\n '''Updates table widget with data from ma_unit'''\n\n self.simple_table.blockSignals(True)\n for row_index, group_name in enumerate(self.cur_groups):\n grp_raw_data = self.ma_unit.get_raw_data_for_group(group_name)\n for col in range(len(grp_raw_data)):\n self._set_val(row_index, col, grp_raw_data[col], self.simple_table)\n # also insert the SEs, if we have them\n se_col = 3\n se = self.ma_unit.get_se(self.cur_effect, self.group_str, self.mult)\n self._set_val(row_index, col, grp_raw_data[col], self.simple_table)\n self.simple_table.blockSignals(False) \n self.impute_data()\n\n \n def _cell_data_not_valid(self, celldata_string, cell_header=None):\n # ignore blank entries\n if celldata_string.trimmed() == \"\" or celldata_string is None:\n return None\n\n if not is_a_float(celldata_string):\n return \"Raw data needs to be numeric.\"\n\n if cell_header in ['n','sd','se','var','pval'] and float(celldata_string) < 0:\n return \"%s cannot be negative.\" % (cell_header,)\n \n if cell_header == 'pval' and not (0 <= float(celldata_string) <= 1):\n return \"pval must be between 0 and 1\"\n return None\n \n def _get_correlation_str(self):\n return str(self.correlation_pre_post.text())\n \n def _cell_changed(self, row, col):\n \n old_ma_unit, old_tables_data = self._save_ma_unit_and_table_states(\n tables=self.tables,\n ma_unit=self.ma_unit,\n table=self.simple_table,\n row=row, col=col,\n old_value=self.current_item_data[self.simple_table],\n use_old_value=True)\n old_correlation = self._get_correlation_str()\n \n # Just for simple_table for now \n column_headers = self.get_column_header_strs()\n try:\n warning_msg = self._cell_data_not_valid(self.simple_table.item(row, col).text(),column_headers[col])\n if warning_msg:\n raise Exception(\"Invalid Cell Data\")\n self.impute_data()\n except Exception as e:\n msg = e.args[0]\n QMessageBox.warning(self.parent(), \"whoops\", msg)\n self.restore_ma_unit_and_tables(old_ma_unit, old_tables_data, old_correlation)\n return\n \n self._copy_raw_data_from_table_to_ma_unit() # table --> ma_unit\n self.try_to_update_cur_outcome()\n \n new_ma_unit, new_tables_data = self._save_ma_unit_and_table_states(\n tables=self.tables,\n ma_unit=self.ma_unit,\n table=self.simple_table,\n row=row, col=col,\n use_old_value=False)\n new_correlation = self._get_correlation_str()\n restore_old_f = lambda: self.restore_ma_unit_and_tables(old_ma_unit, old_tables_data, old_correlation)\n restore_new_f = lambda: self.restore_ma_unit_and_tables(new_ma_unit, new_tables_data, new_correlation)\n command = calc_fncs.CommandFieldChanged(restore_new_f=restore_new_f,\n restore_old_f=restore_old_f,\n parent=self)\n self.undoStack.push(command)\n \n ###self.enable_txt_box_input() # if the effect was imputed\n ###self.set_clear_btn_color()\n \n def _set_val(self, row_index, var_index, val, table=None):\n if table == None:\n table = self.simple_table\n \n row,col = row_index, var_index \n if is_NaN(val): # get out quick\n print \"%s is not a number\" % val\n return\n \n try:\n table.blockSignals(True)\n str_val = \"\" if val in EMPTY_VALS else str(float(val))\n if table.item(row, col) is None:\n table.setItem(row, col, QTableWidgetItem(str_val))\n else:\n table.item(row, col).setText(str_val)\n table.blockSignals(False)\n \n ###self._disable_row_if_filled(table, row, col)\n except:\n print \"Unexpected error:\", sys.exc_info()[0]\n print(\"Got to except in _set_val when trying to set (%d,%d) to %s\" % (row,col, str(val))) \n #raise \n\n def _disable_row_if_filled(self, table, row, col):\n #if str_val != \"\": #disable item\n table.blockSignals(True)\n N_col = table.columnCount()\n \n print(\"Row is filled? %s\" % str(self._table_row_filled(table, row)))\n \n if self._table_row_filled(table, row):\n print(\"Disabling row... %d\" % row)\n for col in range(N_col):\n self._disable_cell(table, row, col)\n table.blockSignals(False)\n \n def _disable_cell(self, table, row, col):\n table.blockSignals(True)\n item = table.item(row, col)\n newflags = item.flags() & ~Qt.ItemIsEditable\n item.setFlags(newflags)\n table.blockSignals(False)\n \n def _table_row_filled(self, table, row):\n N_col = table.columnCount()\n row_filled = True\n for col in range(N_col):\n item = table.item(row, col)\n if item is None or item.text() == \"\":\n row_filled = False\n return row_filled\n\n def _copy_raw_data_from_table_to_ma_unit(self):\n for row_index, group_name in enumerate(self.cur_groups):\n grp_raw_data = self.ma_unit.get_raw_data_for_group(group_name)\n for col_index in range(len(grp_raw_data)):\n cur_val = self._get_float(row_index, col_index)\n self.ma_unit.get_raw_data_for_group(group_name)[col_index] = cur_val\n\n ## also check if SEs have been entered directly\n ##se_index = 3\n ##se = self._get_float(row_index, se_index)\n ##self.ma_unit.set_SE(self.cur_effect, self.group_str, se):\n \n def restore_ma_unit(self, old_ma_unit):\n ''' Restores the ma_unit data and resets the form'''\n self.ma_unit.__dict__ = copy.deepcopy(old_ma_unit.__dict__)\n print(\"Restored ma_unit data: %s\" % str(self.ma_unit.get_raw_data_for_groups(self.cur_groups)))\n \n self.initialize_form() # clear form first\n self.update_raw_data()\n self.set_current_effect()\n self.impute_data()\n self.enable_back_calculation_btn()\n #self.set_clear_btn_color()\n \n def restore_tables(self, old_tables_data):\n '''Assumes old tables data given in follow order:\n simple_table, g1_pre_post_table, g2_pre_post_table\n '''\n \n for i,old_table_data in enumerate(old_tables_data):\n nrows = len(old_table_data)\n ncols = len(old_table_data[0])\n table = self.tables[i]\n \n for row in range(nrows):\n for col in range(ncols):\n table.blockSignals(True)\n self._set_val(row, col, old_table_data[row][col], table=table)\n table.blockSignals(False)\n \n def restore_ma_unit_and_tables(self, old_ma_unit, old_tables_data,\n old_correlation):\n self.restore_ma_unit(old_ma_unit)\n self.restore_tables(old_tables_data)\n self.correlation_pre_post.setText(old_correlation)\n\n def save_tables_data(self):\n old_tables_data = []\n for table in self.tables:\n old_tables_data.append(calc_fncs.save_table_data(table))\n return old_tables_data\n \n def _save_ma_unit_and_table_states(self, tables, ma_unit, table=None, row=None,\n col=None, old_value=None,\n use_old_value=True):\n # Make backup of tables info...\n old_tables_data = self.save_tables_data()\n if use_old_value:\n # From before most recently changed cell changed\n old_tables_data[self._get_index_of_table(table)][row][col] = old_value\n \n # Make backup copy of ma_unit\n old_ma_unit = copy.deepcopy(ma_unit)\n return old_ma_unit, old_tables_data\n \n \n def _get_index_of_table(self, table):\n index = -1\n for i,x in enumerate(self.tables):\n if table is x:\n index = i\n return index\n \n \n def impute_data(self):\n ''' compute what we can for each study from what has been given in the table'''\n \n # note that we rely on the variable names corresponding to what\n # the meta_py_r routine expects.\n var_names = self.get_column_header_strs()\n for row_index, group_name in enumerate(self.cur_groups):\n # assemble the fields in a dictionary; pass off to meta_py_r\n cur_dict = {}\n for var_index, var_name in enumerate(var_names):\n var_value = self._get_float(row_index, var_index)\n if var_value is not None:\n cur_dict[var_name] = var_value\n\n # now pass off what we have for this study to the\n # imputation routine\n alpha = self.conf_level_to_alpha()\n results_from_r = meta_py_r.impute_cont_data(cur_dict, alpha)\n\n print \"Raw results from R (imputation): %s\" % results_from_r\n print results_from_r\n\n print \"Results from r succeeded?:\", results_from_r[\"succeeded\"]\n if results_from_r[\"succeeded\"]:\n computed_vals = results_from_r[\"output\"]\n # and then iterate over the columns again, \n # populating the table with any available\n # computed fields\n \n print \"Computed vals:\",computed_vals\n for var_index, var_name in enumerate(var_names): \n self._set_val(row_index, var_index, computed_vals[var_name])\n self._copy_raw_data_from_table_to_ma_unit()\n else:\n try:\n print(\"Why didn't it succeed?: '%s'\" % results_from_r[\"comment\"])\n except KeyError:\n pass\n def conf_level_to_alpha(self):\n alpha = 1-self.conf_level/100.0\n return alpha\n \n def impute_pre_post_data(self, table, group_index, row=None, col=None):\n ''' \n The row index corresponds to the group that will be\n affected by the data edits. E.g., a row index of 0 will result\n in the data for the first group (row 0 in the simple_table)\n being modified.\n '''\n \n if not (row,col) == (None, None): # means this was called through user interaction, not programmatically\n old_ma_unit, old_tables_data = self._save_ma_unit_and_table_states(\n tables=self.tables,\n ma_unit=self.ma_unit,\n table=table,\n row=row, col=col,\n old_value=self.current_item_data[table],\n use_old_value=True)\n old_correlation = self._get_correlation_str()\n \n \n group_name = self.cur_groups[group_index]\n var_names = self.get_column_header_strs_pre_post()\n params_dict = {}\n # A, B correspond to pre, post\n for a_b_index, a_b_name in enumerate([\"A\", \"B\"]):\n # assemble the fields in a dictionary; pass off to meta_py_r\n for var_index, var_name in enumerate(var_names):\n var_value = self._get_float(a_b_index, var_index, table)\n if var_value is not None:\n params_dict[\"%s.%s\" % (var_name, a_b_name)] = var_value\n params_dict['metric']= (\"'%s'\" % self.cur_effect)\n\n # now pass off what we have for this study to the\n # imputation routine\n results_from_r = meta_py_r.impute_pre_post_cont_data(params_dict,\n float(self.correlation_pre_post.text()),\n self.conf_level_to_alpha())\n \n print \"imputation results from R: %s\" % results_from_r\n \n if not results_from_r[\"succeeded\"]:\n return None\n \n print(\"Prepost-imputation succeeded\")\n \n ### \n # first update the simple table\n computed_vals = results_from_r[\"output\"]\n \n for var_index, var_name in enumerate(self.get_column_header_strs()):\n val = computed_vals[var_name]\n self._set_val(group_index, var_index, val)\n\n # update the raw data for N, mean and SD fields (this is all that is actually stored)\n if var_index < 3:\n self.ma_unit.get_raw_data_for_group(group_name)[var_index] = computed_vals[var_name] #\n \n self.try_to_update_cur_outcome() \n \n ###\n # also update the pre/post tables\n pre_vals = results_from_r[\"pre\"]\n post_vals = results_from_r[\"post\"]\n for var_index, var_name in enumerate(var_names):\n pre_val = pre_vals[var_name]\n post_val = post_vals[var_name]\n self._set_val(0, var_index, pre_val, table)\n self._set_val(1, var_index, post_val, table)\n \n self._copy_raw_data_from_table_to_ma_unit()\n self.set_clear_btn_color()\n \n # function was invoked as a result of user interaction, not\n # programmatically\n if not (row,col) == (None, None):\n new_ma_unit, new_tables_data = self._save_ma_unit_and_table_states(\n tables=self.tables,\n ma_unit=self.ma_unit,\n table=table,\n row=row, col=col,\n use_old_value=False)\n new_correlation = self._get_correlation_str()\n restore_old_f = lambda: self.restore_ma_unit_and_tables(old_ma_unit, old_tables_data, old_correlation)\n restore_new_f = lambda: self.restore_ma_unit_and_tables(new_ma_unit, new_tables_data, new_correlation)\n command = calc_fncs.CommandFieldChanged(restore_new_f=restore_new_f,\n restore_old_f=restore_old_f,\n parent=self)\n self.undoStack.push(command)\n \n def float_to_str(self, float_val):\n float_str = \"\"\n if not is_NaN(float_val):\n # TODO note the hard-coded number of digits here\n float_str = str(round(float_val, 4))\n return float_str \n \n def get_column_header_strs(self, table=None):\n if table is None:\n table = self.simple_table\n\n return [str(h_item.text()) for h_item in \\\n [table.horizontalHeaderItem(col) for col in \\\n range(table.columnCount())]]\n \n def get_column_header_strs_pre_post(self):\n return self.get_column_header_strs(table=self.g1_pre_post_table)\n \n @pyqtSignature(\"int, int, int, int\")\n def on_simple_table_currentCellChanged(self,currentRow,currentColumn,previousRow,previousColumn):\n self.current_item_data[self.simple_table] = self._get_float(currentRow,currentColumn)\n ###print \"Current Item Data:\",self.current_item_data\n \n @pyqtSignature(\"int, int, int, int\")\n def on_g1_pre_post_table_currentCellChanged(self,currentRow,currentColumn,previousRow,previousColumn):\n self.current_item_data[self.g1_pre_post_table] = self._get_float(currentRow,currentColumn)\n ###print \"Current Item Data:\",self.current_item_data\n \n @pyqtSignature(\"int, int, int, int\")\n def on_g2_pre_post_table_currentCellChanged(self,currentRow,currentColumn,previousRow,previousColumn):\n self.current_item_data[self.g2_pre_post_table] = self._get_float(currentRow,currentColumn)\n ###print \"Current Item Data:\",self.current_item_data\n \n\n def _is_empty(self, i, j, table):\n val = table.item(i,j)\n return val is None or val.text() == \"\"\n \n def _get_float(self, i, j, table=None):\n if table is None:\n table = self.simple_table\n \n if not self._is_empty(i, j, table) and not table.item(i,j).text() == \"NA\":\n try:\n return float(table.item(i,j).text())\n except:\n print(\"Could not convert %s to float\" % table.item(i,j))\n return None\n return None\n \n def no_val(self, x):\n return x is None or x == \"\"\n \n def try_to_update_cur_outcome(self):\n n1, m1, sd1, n2, m2, sd2 = self.ma_unit.get_raw_data_for_groups(self.cur_groups)\n se1, se2 = self._get_float(0, 3), self._get_float(1, 3)\n \n # here we check whether or not we have sufficient data to compute an outcome\n if not any([self.no_val(x) for x in [n1, m1, sd1, n2, m2, sd2 ]]) or \\\n not any([self.no_val(x) for x in [m1, se1, m2, se2]]) and self.cur_effect==\"MD\" or \\\n not any([self.no_val(x) for x in [n1, m1, sd1]]) and self.cur_effect in CONTINUOUS_ONE_ARM_METRICS:\n est_and_ci_d = None\n if self.cur_effect in CONTINUOUS_TWO_ARM_METRICS:\n est_and_ci_d = meta_py_r.continuous_effect_for_study(n1, m1, sd1, se1=se1, \n n2=n2, m2=m2, sd2=sd2, se2=se2,\n metric=self.cur_effect,\n conf_level=self.conf_level)\n else:\n # continuous, one-arm metric\n est_and_ci_d = meta_py_r.continuous_effect_for_study(n1, m1, sd1,\n two_arm=False, metric=self.cur_effect, conf_level=self.conf_level) \n \n est, low, high = est_and_ci_d[\"calc_scale\"] # calculation (e.g., log) scale\n self.ma_unit.set_effect_and_ci(self.cur_effect, self.group_str, est, low, high, mult=self.mult) \n self.set_current_effect()\n \n def enable_txt_box_input(self):\n ''' Enables text boxes if they are empty, disables them otherwise '''\n pass\n #meta_globals.enable_txt_box_input(self.effect_txt_box, self.low_txt_box,\n # self.high_txt_box, self.correlation_pre_post)\n \n def enable_back_calculation_btn(self, engage = False):\n # For undo/redo\n old_ma_unit, old_tables_data = self._save_ma_unit_and_table_states(\n tables = [self.simple_table,\n self.g1_pre_post_table,\n self.g2_pre_post_table],\n ma_unit = self.ma_unit, \n use_old_value=False)\n old_correlation = self._get_correlation_str()\n \n # Choose metric parameter if not already chosen\n if (self.metric_parameter is None) and self.cur_effect in [\"MD\",\"SMD\"]:\n print(\"need to choose metric parameter because it is %s\" % str(self.metric_parameter))\n if self.cur_effect == \"MD\":\n info = \"In order to perform back-calculation most accurately, we need to know something about the assumptions about the two population standard deviations.\\n*Are we assuming that both of the population standard deviations are the same (as in most parametric data analysis techniques)\"\n option0_txt = \"yes (default).\"\n option1_txt = \"no\"\n dialog = ChooseBackCalcResultForm(info, option0_txt, option1_txt)\n dialog.setWindowTitle(\"Population SD Assumptions\")\n if dialog.exec_():\n self.metric_parameter = True if dialog.getChoice() == 0 else False\n elif self.cur_effect == \"SMD\":\n info = \"In order to perform back-calculation most accurately, we need to know if the the bias in the SMD been corrected i.e. should we use Hedge's g or Cohen's d when performing the back calculation?\"\n option0_txt = \"Hedges' g (default)\" \n option1_txt = \"Cohen's d\"\n dialog = ChooseBackCalcResultForm(info, option0_txt, option1_txt)\n dialog.setWindowTitle(\"SMD bias correction\")\n if dialog.exec_():\n self.metric_parameter = True if dialog.getChoice() == 0 else False\n print(\"metric_parameter is now %s\" % str(self.metric_parameter))\n \n def build_data_dicts():\n var_names = self.get_column_header_strs()\n tmp = []\n for row_index in range(2):\n value = lambda x: self._get_float(row_index, x)\n tmp.append([(var_name, value(i)) for i, var_name in enumerate(var_names) if value(i) is not None])\n group1_data = dict(tmp[0])\n group2_data = dict(tmp[1])\n \n tmp = self.ma_unit.get_effect_and_ci(self.cur_effect, self.group_str, self.mult)\n effect_data = {\"est\":tmp[0], \"low\":tmp[1], \"high\":tmp[2],\n \"metric\":self.cur_effect,\n \"met.param\":self.metric_parameter}\n \n #print(\"Group 1 Data: \", group1_data)\n #print(\"Group 2 Data: \", group2_data)\n #print(\"Effect Data: \", effect_data)\n \n return (group1_data, group2_data, effect_data)\n def new_data(g1_data, g2_data, imputed):\n changed = False\n \n new_data = (imputed[\"n1\"],\n imputed[\"sd1\"],\n imputed[\"mean1\"],\n imputed[\"n2\"],\n imputed[\"sd2\"],\n imputed[\"mean2\"])\n old_data = (g1_data[\"n\"] if \"n\" in g1_data else None,\n g1_data[\"sd\"] if \"sd\" in g1_data else None,\n g1_data[\"mean\"] if \"mean\" in g1_data else None,\n g2_data[\"n\"] if \"n\" in g2_data else None,\n g2_data[\"sd\"] if \"sd\" in g2_data else None,\n g2_data[\"mean\"] if \"mean\" in g2_data else None,\n )\n new_item_available = lambda old, new: (old is None) and (new is not None)\n comparison = [new_item_available(old_data[i], new_data[i]) for i in range(len(new_data))]\n print(\"Comparison:\", comparison)\n if any(comparison):\n changed = True\n else:\n changed = False\n return changed\n \n if self.cur_effect not in [\"MD\", \"SMD\"]:\n self.back_calc_btn.setVisible(False)\n return None\n else:\n self.back_calc_btn.setVisible(True)\n \n (group1_data, group2_data, effect_data) = build_data_dicts()\n imputed = meta_py_r.back_calc_cont_data(group1_data, group2_data, effect_data, self.conf_level)\n print(\"Imputed data: \", imputed)\n \n # Leave if there was a failure\n if \"FAIL\" in imputed:\n print(\"Failure to impute\")\n self.back_calc_btn.setEnabled(False)\n return None\n \n if new_data(group1_data, group2_data, imputed):\n self.back_calc_btn.setEnabled(True)\n else:\n self.back_calc_btn.setEnabled(False)\n self.set_clear_btn_color()\n \n if not engage:\n return None\n \n ########################################################################\n # Actually do stuff with imputed data here if we are 'engaged'\n ########################################################################\n # Choose one of the values if multiple ones were returned in the output\n keys_to_names = {\"n1\":\"group 1 sample size\",\n \"n2\":\"group 2 sample size\",\n \"sd1\":\"group 1 standard deviation\",\n \"sd2\":\"group 2 standard deviation\",\n \"mean1\":\"group 1 mean\",\n \"mean2\":\"group 2 mean\"}\n for key,value in imputed.iteritems():\n # TODO: (maybe).....: The R code which generates results can\n # POTENTIALLY yield a maximum of 4 numbers for n1 and n2. However,\n # empirical testing has shown that this doesn't really happen.\n # However, for completeness in the future the number of\n # ChooseBackCalcResultForm options should be generated dynamically\n \n if is_list(value):\n info = (\"The back calculation has resulted in multiple results for \"\n + keys_to_names[key] + \"\\n\\nPlease choose one of the following:\")\n option0_txt = keys_to_names[key] + \" = \" + str(value[0])\n option1_txt = keys_to_names[key] + \" = \" + str(value[1])\n print(\"Options (0,1)\", value[0], value[1])\n \n dialog = ChooseBackCalcResultForm(info, option0_txt, option1_txt)\n if dialog.exec_():\n imputed[key] = value[0] if dialog.getChoice() == 0 else value[1]\n else: # pressed cancel\n return None # do nothing and leave\n \n # Write the data to the table\n var_names = self.get_column_header_strs()\n group1_data = {\"n\":imputed[\"n1\"],\n \"sd\":imputed[\"sd1\"],\n \"mean\":imputed[\"mean1\"]}\n group2_data = {\"n\":imputed[\"n2\"],\n \"sd\":imputed[\"sd2\"],\n \"mean\":imputed[\"mean2\"]}\n for row in range(len(self.cur_groups)):\n for var_index, var_name in enumerate(var_names):\n if var_name not in [\"n\",\"sd\",\"mean\"]:\n continue\n val = group1_data[var_name] if row == 0 else group2_data[var_name]\n if var_name == 'n' and val not in EMPTY_VALS: \n val = int(round(val)) # convert float to integer\n self._set_val(row, var_index, val, self.simple_table)\n \n self.impute_data()\n self._copy_raw_data_from_table_to_ma_unit()\n #self.set_clear_btn_color()\n \n # For undo/redo\n self.enable_back_calculation_btn()\n new_ma_unit, new_tables_data = self._save_ma_unit_and_table_states(\n tables = [self.simple_table,\n self.g1_pre_post_table,\n self.g2_pre_post_table],\n ma_unit = self.ma_unit, \n use_old_value=False)\n new_correlation = self._get_correlation_str()\n restore_old_f = lambda: self.restore_ma_unit_and_tables(old_ma_unit, old_tables_data, old_correlation)\n restore_new_f = lambda: self.restore_ma_unit_and_tables(new_ma_unit, new_tables_data, new_correlation)\n command = calc_fncs.CommandFieldChanged(restore_new_f=restore_new_f,\n restore_old_f=restore_old_f,\n parent=self)\n self.undoStack.push(command)\n \n def clear_form(self):\n # For undo/redo\n old_ma_unit, old_tables_data = self._save_ma_unit_and_table_states(\n tables = [self.simple_table,\n self.g1_pre_post_table,\n self.g2_pre_post_table],\n ma_unit = self.ma_unit, \n use_old_value=False)\n old_correlation = self._get_correlation_str()\n \n self.metric_parameter = None # } these two should go together\n self.enable_txt_box_input() # }\n \n calc_fncs.block_signals(self.entry_widgets, True)\n # reset tables\n for table in self.tables:\n for row_index in range(len(self.cur_groups)):\n for var_index in range(table.columnCount()):\n self._set_val(row_index, var_index, \"\", table=table)\n calc_fncs.block_signals(self.entry_widgets, False)\n \n self._copy_raw_data_from_table_to_ma_unit()\n\n # clear out effects stuff\n for metric in CONTINUOUS_ONE_ARM_METRICS + CONTINUOUS_TWO_ARM_METRICS:\n if ((self.cur_effect in CONTINUOUS_TWO_ARM_METRICS and metric in CONTINUOUS_TWO_ARM_METRICS) or\n (self.cur_effect in CONTINUOUS_ONE_ARM_METRICS and metric in CONTINUOUS_ONE_ARM_METRICS)):\n self.ma_unit.set_effect_and_ci(metric, self.group_str, None, None, None, mult=self.mult)\n else:\n # TODO: Do nothing for now..... treat the case where we have to switch group strings down the line\n pass\n \n # clear line edits\n self.set_current_effect()\n calc_fncs.block_signals(self.entry_widgets, True)\n self.correlation_pre_post.setText(\"0.0\")\n calc_fncs.block_signals(self.entry_widgets, False)\n \n # For undo/redo\n self.enable_back_calculation_btn()\n new_ma_unit, new_tables_data = self._save_ma_unit_and_table_states(\n tables = [self.simple_table,\n self.g1_pre_post_table,\n self.g2_pre_post_table],\n ma_unit = self.ma_unit, \n use_old_value=False)\n new_correlation = self._get_correlation_str()\n restore_old_f = lambda: self.restore_ma_unit_and_tables(old_ma_unit, old_tables_data, old_correlation)\n restore_new_f = lambda: self.restore_ma_unit_and_tables(new_ma_unit, new_tables_data, new_correlation)\n command = calc_fncs.CommandFieldChanged(restore_new_f=restore_new_f,\n restore_old_f=restore_old_f,\n parent=self)\n self.undoStack.push(command)\n \n \n def get_effect_names(self):\n effects = self.ma_unit.get_effect_names()\n return effects\n\n def get_cur_group_str(self):\n # Inspired from get_cur_group_str of ma_data_table_model\n\n if self.cur_effect in CONTINUOUS_ONE_ARM_METRICS:\n group_str = self.cur_groups[0]\n else:\n group_str = \"-\".join(self.cur_groups)\n return group_str\n \n ####### Undo framework ############\n def undo(self):\n print(\"undoing....\")\n self.undoStack.undo()\n \n def redo(self):\n print(\"redoing....\")\n self.undoStack.redo()\n\n\n################################################################################\nclass ChooseBackCalcResultForm(QDialog, forms.ui_choose_back_calc_result_form.Ui_ChooseBackCalcResultForm):\n def __init__(self, info_text, op1_txt, op2_txt, parent=None, op3_txt=None, op4_txt=None):\n super(ChooseBackCalcResultForm, self).__init__(parent)\n self.setupUi(self)\n \n ####self.choice1_lbl.setText(op1_txt)\n ####self.choice2_lbl.setText(op2_txt)\n \n self.choice1_btn.setText(op1_txt)\n self.choice2_btn.setText(op2_txt)\n \n \n self.info_label.setText(info_text)\n\n def getChoice(self):\n # Choice data to be returned is index of data item\n if self.choice1_btn.isChecked():\n return 0\n else:\n return 1\n################################################################################\n " }, { "alpha_fraction": 0.6928308606147766, "alphanum_fraction": 0.706250011920929, "avg_line_length": 52.33333206176758, "blob_id": "b07a320ec50e79c97674e0c5fa608995a9e1419b", "content_id": "122a914eea2a29e8aaa51bc95e72f9c552813894", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5440, "license_type": "no_license", "max_line_length": 106, "num_lines": 102, "path": "/src/forms/ui_meta_reg.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'cov_reg_dlg2.ui'\n#\n# Created: Wed Apr 17 14:37:19 2013\n# by: PyQt4 UI code generator 4.10\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt4 import QtCore, QtGui\n\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n def _fromUtf8(s):\n return s\n\ntry:\n _encoding = QtGui.QApplication.UnicodeUTF8\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig, _encoding)\nexcept AttributeError:\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig)\n\nclass Ui_cov_reg_dialog(object):\n def setupUi(self, cov_reg_dialog):\n cov_reg_dialog.setObjectName(_fromUtf8(\"cov_reg_dialog\"))\n cov_reg_dialog.resize(401, 323)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n cov_reg_dialog.setFont(font)\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(_fromUtf8(\":/images/meta.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n cov_reg_dialog.setWindowIcon(icon)\n self.verticalLayout = QtGui.QVBoxLayout(cov_reg_dialog)\n self.verticalLayout.setObjectName(_fromUtf8(\"verticalLayout\"))\n self.label = QtGui.QLabel(cov_reg_dialog)\n self.label.setObjectName(_fromUtf8(\"label\"))\n self.verticalLayout.addWidget(self.label)\n spacerItem = QtGui.QSpacerItem(20, 10, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)\n self.verticalLayout.addItem(spacerItem)\n self.cov_grp_box = QtGui.QGroupBox(cov_reg_dialog)\n self.cov_grp_box.setObjectName(_fromUtf8(\"cov_grp_box\"))\n self.verticalLayout.addWidget(self.cov_grp_box)\n spacerItem1 = QtGui.QSpacerItem(20, 30, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)\n self.verticalLayout.addItem(spacerItem1)\n self.diagnostic_group_box = QtGui.QGroupBox(cov_reg_dialog)\n self.diagnostic_group_box.setMinimumSize(QtCore.QSize(0, 50))\n self.diagnostic_group_box.setObjectName(_fromUtf8(\"diagnostic_group_box\"))\n self.verticalLayout_3 = QtGui.QVBoxLayout(self.diagnostic_group_box)\n self.verticalLayout_3.setObjectName(_fromUtf8(\"verticalLayout_3\"))\n self.dor_radio = QtGui.QRadioButton(self.diagnostic_group_box)\n self.dor_radio.setChecked(True)\n self.dor_radio.setObjectName(_fromUtf8(\"dor_radio\"))\n self.verticalLayout_3.addWidget(self.dor_radio)\n self.sensitivity_radio = QtGui.QRadioButton(self.diagnostic_group_box)\n self.sensitivity_radio.setChecked(False)\n self.sensitivity_radio.setObjectName(_fromUtf8(\"sensitivity_radio\"))\n self.verticalLayout_3.addWidget(self.sensitivity_radio)\n self.fixed_radio = QtGui.QRadioButton(self.diagnostic_group_box)\n self.fixed_radio.setObjectName(_fromUtf8(\"fixed_radio\"))\n self.verticalLayout_3.addWidget(self.fixed_radio)\n self.verticalLayout.addWidget(self.diagnostic_group_box)\n self.groupBox = QtGui.QGroupBox(cov_reg_dialog)\n self.groupBox.setMinimumSize(QtCore.QSize(0, 50))\n self.groupBox.setObjectName(_fromUtf8(\"groupBox\"))\n self.verticalLayout_2 = QtGui.QVBoxLayout(self.groupBox)\n self.verticalLayout_2.setObjectName(_fromUtf8(\"verticalLayout_2\"))\n self.random_effects_radio = QtGui.QRadioButton(self.groupBox)\n self.random_effects_radio.setChecked(True)\n self.random_effects_radio.setObjectName(_fromUtf8(\"random_effects_radio\"))\n self.verticalLayout_2.addWidget(self.random_effects_radio)\n self.fixed_effects_radio = QtGui.QRadioButton(self.groupBox)\n self.fixed_effects_radio.setObjectName(_fromUtf8(\"fixed_effects_radio\"))\n self.verticalLayout_2.addWidget(self.fixed_effects_radio)\n self.verticalLayout.addWidget(self.groupBox)\n self.buttonBox = QtGui.QDialogButtonBox(cov_reg_dialog)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n self.buttonBox.setFont(font)\n self.buttonBox.setOrientation(QtCore.Qt.Horizontal)\n self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)\n self.buttonBox.setObjectName(_fromUtf8(\"buttonBox\"))\n self.verticalLayout.addWidget(self.buttonBox)\n\n self.retranslateUi(cov_reg_dialog)\n QtCore.QMetaObject.connectSlotsByName(cov_reg_dialog)\n\n def retranslateUi(self, cov_reg_dialog):\n cov_reg_dialog.setWindowTitle(_translate(\"cov_reg_dialog\", \"select covariates\", None))\n self.label.setText(_translate(\"cov_reg_dialog\", \"select covariates for regression:\", None))\n self.cov_grp_box.setTitle(_translate(\"cov_reg_dialog\", \"available covariates\", None))\n self.diagnostic_group_box.setTitle(_translate(\"cov_reg_dialog\", \"metric\", None))\n self.dor_radio.setText(_translate(\"cov_reg_dialog\", \"diagnostic odds ratio\", None))\n self.sensitivity_radio.setText(_translate(\"cov_reg_dialog\", \"sensitivity\", None))\n self.fixed_radio.setText(_translate(\"cov_reg_dialog\", \"specificity\", None))\n self.groupBox.setTitle(_translate(\"cov_reg_dialog\", \"model type\", None))\n self.random_effects_radio.setText(_translate(\"cov_reg_dialog\", \"random effects\", None))\n self.fixed_effects_radio.setText(_translate(\"cov_reg_dialog\", \"fixed effect\", None))\n\nimport icons_rc\n" }, { "alpha_fraction": 0.6091549396514893, "alphanum_fraction": 0.61091548204422, "avg_line_length": 32.92537307739258, "blob_id": "2274ae3df6d3a852a68367f136cef6d9c8e225fd", "content_id": "8255b5bbeeff79b368e330b0be8e263fb40a2a18", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2272, "license_type": "no_license", "max_line_length": 86, "num_lines": 67, "path": "/src/add_new_dialogs.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "#from PyQt4.Qt import *\nfrom PyQt4.Qt import QDialog, QString, QVariant\n#from meta_globals import *\nfrom meta_globals import DIAGNOSTIC\nimport forms.ui_new_group\nimport forms.ui_new_follow_up\nimport forms.ui_new_outcome\nimport forms.ui_new_covariate\nimport forms.ui_new_study\n\n#import pdb\n\nclass AddNewGroupForm(QDialog, forms.ui_new_group.Ui_new_group_dialog):\n \n def __init__(self, parent=None):\n super(AddNewGroupForm, self).__init__(parent)\n self.setupUi(self)\n \n \nclass AddNewFollowUpForm(QDialog, forms.ui_new_follow_up.Ui_new_follow_up_dialog):\n \n def __init__(self, parent=None):\n super(AddNewFollowUpForm, self).__init__(parent)\n self.setupUi(self)\n \n \nclass AddNewOutcomeForm(QDialog, forms.ui_new_outcome.Ui_Dialog):\n \n def __init__(self, parent=None, is_diag=False):\n super(AddNewOutcomeForm, self).__init__(parent)\n ###\n # we need to know if the outcome should be diagnostic\n # or not.\n self.is_diag = is_diag\n \n self.setupUi(self)\n self._populate_combo_box()\n \n def _populate_combo_box(self):\n # diagnostic datasets can have only diagnostic outcomes\n if self.is_diag:\n self.datatype_cbo_box.addItem(QString(\"Diagnostic\"), QVariant(DIAGNOSTIC))\n else:\n for name, type_id in zip([QString(s) for s in [\"Binary\", \"Continuous\"]],\n [QVariant(i) for i in range(2)]):\n self.datatype_cbo_box.addItem(name, type_id)\n \n\n\nclass AddNewStudyForm(QDialog, forms.ui_new_study.Ui_new_study_dialog):\n \n def __init__(self, parent=None):\n super(AddNewStudyForm, self).__init__(parent)\n self.setupUi(self)\n \nclass AddNewCovariateForm(QDialog, forms.ui_new_covariate.Ui_new_covariate_dialog):\n \n def __init__(self, parent=None):\n super(AddNewCovariateForm, self).__init__(parent)\n self.setupUi(self)\n self._populate_combo_box()\n\n \n def _populate_combo_box(self):\n for name, type_id in zip([QString(s) for s in [\"continuous\", \"factor\"]],\n [QVariant(i) for i in range(2)]):\n self.datatype_cbo_box.addItem(name, type_id)" }, { "alpha_fraction": 0.6552884578704834, "alphanum_fraction": 0.6572115421295166, "avg_line_length": 28.72857093811035, "blob_id": "26e20b0606b3405d9ac2062aff081b7416d6bd76", "content_id": "36c16ad09474c3d2067e9f28e1f94ba8aa79966f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2080, "license_type": "no_license", "max_line_length": 88, "num_lines": 70, "path": "/src/launch.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "import sys, time\nfrom PyQt4 import QtGui\nfrom PyQt4.Qt import *\n\nimport meta_py_r\nimport meta_form\nimport meta_globals\nimport settings\nfrom meta_form import DISABLE_NETWORK_STUFF\n\nSPLASH_DISPLAY_TIME = 0 # TODO: change to 5 seconds in production version\n\ndef load_R_libraries(app, splash=None):\n ''' Loads the R libraries while updating the splash screen'''\n \n meta_py_r.get_R_libpaths() # print the lib paths\n rloader = meta_py_r.RlibLoader()\n \n splash.showMessage(\"Loading R libraries\\n..\")\n app.processEvents()\n \n splash.showMessage(\"Loading metafor\\n....\")\n app.processEvents()\n rloader.load_metafor()\n \n splash.showMessage(\"Loading openmetar\\n........\")\n app.processEvents()\n rloader.load_openmetar()\n \n splash.showMessage(\"Loading igraph\\n............\")\n app.processEvents()\n rloader.load_igraph()\n \n splash.showMessage(\"Loading grid\\n................\")\n app.processEvents()\n rloader.load_grid()\n \n if not DISABLE_NETWORK_STUFF:\n splash.showMessage(\"Loading gemtc\\n...................\")\n app.processEvents()\n rloader.load_gemtc()\n\ndef start():\n app = QtGui.QApplication(sys.argv)\n app.setApplicationName(meta_globals.APPLICATION_NAME)\n app.setOrganizationName(meta_globals.ORGANIZATION_NAME)\n settings.setup_directories()\n \n splash_pixmap = QPixmap(\":/misc/splash.png\")\n splash = QSplashScreen(splash_pixmap)\n splash.show()\n splash_starttime = time.time()\n \n load_R_libraries(app, splash)\n \n # Show splash screen for at least SPLASH_DISPLAY_TIME seconds\n time_elapsed = time.time() - splash_starttime\n print(\"It took %s seconds to load the R libraries\" % str(time_elapsed))\n if time_elapsed < SPLASH_DISPLAY_TIME: # seconds\n print(\"Going to sleep for %f seconds\" % float(SPLASH_DISPLAY_TIME-time_elapsed))\n QThread.sleep(int(SPLASH_DISPLAY_TIME-time_elapsed))\n\n meta = meta_form.MetaForm()\n splash.finish(meta)\n meta.show()\n meta.start()\n sys.exit(app.exec_())\n\nif __name__ == \"__main__\":\n start()" }, { "alpha_fraction": 0.6917577981948853, "alphanum_fraction": 0.711101770401001, "avg_line_length": 41.44643020629883, "blob_id": "df0b93cf0b34129eafbebe572b3ddbae7db35bb0", "content_id": "477fce219cb50975843aec7f6c3260f1021c6241", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2378, "license_type": "no_license", "max_line_length": 96, "num_lines": 56, "path": "/src/forms/ui_outcome_name_page.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'outcome_name_page.ui'\n#\n# Created: Thu Jun 27 10:21:34 2013\n# by: PyQt4 UI code generator 4.10.1\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt4 import QtCore, QtGui\n\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n def _fromUtf8(s):\n return s\n\ntry:\n _encoding = QtGui.QApplication.UnicodeUTF8\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig, _encoding)\nexcept AttributeError:\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig)\n\nclass Ui_WizardPage(object):\n def setupUi(self, WizardPage):\n WizardPage.setObjectName(_fromUtf8(\"WizardPage\"))\n WizardPage.resize(285, 46)\n WizardPage.setMinimumSize(QtCore.QSize(285, 45))\n WizardPage.setMaximumSize(QtCore.QSize(300, 50))\n WizardPage.setSubTitle(_fromUtf8(\"\"))\n self.horizontalLayout = QtGui.QHBoxLayout(WizardPage)\n self.horizontalLayout.setObjectName(_fromUtf8(\"horizontalLayout\"))\n self.label = QtGui.QLabel(WizardPage)\n sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())\n self.label.setSizePolicy(sizePolicy)\n self.label.setObjectName(_fromUtf8(\"label\"))\n self.horizontalLayout.addWidget(self.label)\n self.outcome_name_LineEdit = QtGui.QLineEdit(WizardPage)\n self.outcome_name_LineEdit.setEnabled(True)\n self.outcome_name_LineEdit.setText(_fromUtf8(\"\"))\n self.outcome_name_LineEdit.setObjectName(_fromUtf8(\"outcome_name_LineEdit\"))\n self.horizontalLayout.addWidget(self.outcome_name_LineEdit)\n self.label.setBuddy(self.outcome_name_LineEdit)\n\n self.retranslateUi(WizardPage)\n QtCore.QMetaObject.connectSlotsByName(WizardPage)\n\n def retranslateUi(self, WizardPage):\n WizardPage.setWindowTitle(_translate(\"WizardPage\", \"WizardPage\", None))\n WizardPage.setTitle(_translate(\"WizardPage\", \"What is the name of your outcome?\", None))\n self.label.setText(_translate(\"WizardPage\", \"Outcome Name:\", None))\n\n" }, { "alpha_fraction": 0.7060117125511169, "alphanum_fraction": 0.7324047088623047, "avg_line_length": 36.97142791748047, "blob_id": "18bca725389af2064e96e2f5dcc49cd541d62251", "content_id": "f499c927a18db3bbdcea73b237b7886c20752b00", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1364, "license_type": "no_license", "max_line_length": 177, "num_lines": 35, "path": "/src/R/HSROC/man/MRI.Rd", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "\\name{MRI}\r\n\\alias{MRI}\r\n\\docType{data}\r\n\\title{ MAGNETIC RESONANCE IMAGING TEST (MRI) for evaluation of lymph node metastasis in women with invasive cervical cancer }\r\n\r\n\\description{\r\nThis dataset gives the observed cross-tabulation of the MRI (test under evaluation) and histologic/cytologic specimens obtained by surgery or lymph node biopsy (reference test).\r\n}\r\n\r\n\\usage{data(MRI)}\r\n\r\n\\format{\r\n A matrix with 10 observations on the following 4 variables.\r\n \\describe{\r\n \\item{\\code{++}}{Observed individuals who tested positive on both tests }\r\n \\item{\\code{+-}}{Observed individuals who tested positive on the test under evaluation and negative on the reference test}\r\n \\item{\\code{-+}}{Observed individuals who tested negative on the test under evaluation and positive on the reference test}\r\n \\item{\\code{---}}{Observed individuals who tested negative on both tests }\r\n }\r\n}\r\n\r\n\r\n\r\n\\references{ Scheidler J, Hricak H, Yu KK, Subak L, Segal MR. \\emph{Radiological evaluation of lymph node metastases in patients with cervical cander : a meta-analysis}.\r\nJournal of the American Medical Association 1997 ; 278(13):1096-1101. \r\n\r\nC. M. Rutter and C. A. Gatsonis. \\emph{A hierarchical regression approach to meta-analysis of diagnostic accuracy evaluations}. Statistics in Medicine 2001 ; 20(19):2865-2884.\r\n\r\n}\r\n\\examples{\r\ndata(MRI)\r\nMRI\r\n\r\n}\r\n\\keyword{datasets}\r\n" }, { "alpha_fraction": 0.5882830619812012, "alphanum_fraction": 0.5894431471824646, "avg_line_length": 34.47736740112305, "blob_id": "d219cf53211f1bf15fa6aaf6abc20828e764013d", "content_id": "eee84096f07e8cf2c3fc73abbe0552f4ba25bd6a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8620, "license_type": "no_license", "max_line_length": 137, "num_lines": 243, "path": "/src/settings.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "#######################################################################\n# #\n# George Dietz #\n# CEBM @ Brown # \n# OpenMeta[analyst] # \n# # \n# Handle application settings and manage workspace (temp directory) # \n# # \n#######################################################################\n\nimport os\nfrom PyQt4 import QtCore, QtGui\nfrom PyQt4.Qt import *\nimport meta_py_r\n\n##################### HANDLE SETTINGS #####################\n\nMAX_RECENT_FILES = 10\nDEFAULT_SETTINGS = {\"splash\":True,\n \"digits\":3,\n \"recent_files\":[],\n \"explain_diag\":True,\n #\"method_params\":{},\n }\n\ndef update_setting(field, value):\n ''' updates the setting with key field to value (gets converted to a\n QVariant internally, need to reconvert to desired type in get_setting() '''\n \n \n settings = QSettings()\n\n # see if we need to store the value in a special way\n value_type = get_setting_type(field)\n if value_type == list:\n # Make sure that the written elements are strings (for now...., maybe extend it to scalars (i.e. number or string) in the future)\n # for now, this is just for reading the most recent files list\n if settings.contains(field):\n settings.remove(field)\n settings.beginGroup(field)\n for i,x in enumerate(value): # value is a list\n settings.setValue(str(i),x)\n settings.endGroup()\n elif value_type == dict:\n raise Exception(\"Not implemented yet!\")\n elif value_type == bool:\n settings.setValue(field, QVariant(value))\n elif value_type == QColor:\n # just being explicit to signify i am aware of QColors and to match get_setting\n settings.setValue(field, value)\n elif value_type == int:\n settings.setValue(field, value)\n elif value_type == str:\n settings.setValue(field, value)\n else:\n # nothing special needs to be done\n print(\"Field: %s\" % field)\n print(\"Value type: %s\" % str(value_type))\n raise Exception(\"Are you SURE that NOTHING special needs to be done?\")\n settings.setValue(field, value)\n\ndef get_setting_type(field):\n return type(DEFAULT_SETTINGS[field])\n\ndef get_setting(field):\n try:\n return _get_setting_helper(field)\n except Exception as e:\n print \"Exception while trying to access setting '%s', resetting settings to defaults\" % field\n reset_settings()\n return _get_setting_helper(field)\n return _get_setting_helper(field)\n\ndef _get_setting_helper(field):\n settings = QSettings()\n\n # see if we need to store the value in a special way\n value_type = get_setting_type(field)\n #print(\"Setting type: %s for %s\" % (str(value_type), field))\n if value_type == list:\n settings.beginGroup(field)\n indexes = list(settings.childKeys())\n foo_list = []\n for i in indexes:\n value = settings.value(i).toString().toUtf8() # byte array encoded in utf-8\n value = unicode(value, 'utf8')\n foo_list.append(value)\n settings.endGroup()\n setting_value = foo_list\n elif value_type == dict:\n raise Exception(\"Not implemented yet!\")\n elif value_type == bool:\n print(\"Converted %s to a boolean\" % field)\n setting_value = settings.value(field).toBool()\n elif value_type == str:\n setting_value = settings.value(field).toString()\n elif value_type == unicode:\n settings.setValue(field, value)\n elif value_type == int:\n setting_value = settings.value(field).toInt()[0]\n elif value_type == QColor:\n setting_value = QColor(settings.value(field))\n else:\n # nothing special needs to be done\n raise Exception(\"Are you SURE that NOTHING special needs to be done?\")\n setting_value = settings.value(field)\n\n return setting_value\n\n\ndef save_settings():\n print(\"saved settings\")\n settings = QSettings()\n settings.sync() # writes to permanent storage\n\n\ndef load_settings():\n ''' loads settings from QSettings object, setting suitable defaults if\n there are missing fields '''\n\n settings = QSettings()\n\n def field_is_toplevel_child_group_keys(field_name):\n childgroups = list(settings.childGroups())\n toplevel_group_keys = [str(x) for x in childgroups]\n return field_name in toplevel_group_keys\n\n for field, value in DEFAULT_SETTINGS.items():\n setting_present = settings.contains(field) or field_is_toplevel_child_group_keys(field)\n if not setting_present:\n print(\"Filling in setting for %s\" % field)\n update_setting(field, value)\n\n save_settings()\n print(\"loaded settings\")\n return settings\n\n\ndef reset_settings():\n print(\"Resetting settings to default\")\n settings = QSettings()\n settings.clear()\n\n for field, value in DEFAULT_SETTINGS.items():\n update_setting(field, value)\n save_settings()\n\ndef add_file_to_recent_files(fpath):\n # add a new file to the front of the deque\n # move existing file to the front of the deque\n\n if fpath in [None, \"\"]:\n return False\n\n recent_files = get_setting(\"recent_files\")\n\n if fpath in recent_files: #file already in list so move to front\n recent_files.remove(fpath)\n recent_files.append(fpath)\n\n # only want up to MAX_RECENT_FILES\n start_index = len(recent_files) - MAX_RECENT_FILES\n if start_index > 0:\n recent_files = recent_files[start_index:]\n\n update_setting(\"recent_files\", recent_files)\n save_settings()\n\n################ END HANDLE SETTINGS ######################\n\n\n###### HANDLE R_TEMP IN USER-AREA DIRECTORY ###################\ndef setup_directories():\n '''Makes temporary data directory, r_tmp within that\n Sets python and R working directories to temporary data directory\n clears r_tmp '''\n \n # make base path and r_tmp\n base_path = make_base_path()\n make_r_tmp()\n \n meta_py_r.reset_Rs_working_dir() # set working directory on R side\n os.chdir(os.path.normpath(base_path)) # set working directory on python side\n \n clear_r_tmp() # clear r_tmp\n \n \ndef make_base_path():\n ''' Creates the base path if it doesn't exist and returns the path\n On mac, this is something like: /Users/george/Library/Application Support/OpenMetaAnalyst '''\n\n base_path = get_base_path()\n\n success = QDir().mkpath(base_path)\n if not success:\n raise Exception(\"Could not create base path at %s\" % base_path)\n print(\"Made base path: %s\" % base_path)\n return base_path\n\ndef get_base_path(normalize=False):\n '''normalize changes the path separators according to the OS,\n Usually this shouldn't be done because R is confused by backward slashes \\\n because it sees it as an escape character and Qt is fine with / throughout '''\n\n base_path = str(QDesktopServices.storageLocation(QDesktopServices.DataLocation))\n if normalize:\n base_path = str(QDir.toNativeSeparators(base_path))\n print(\"Base path is: %s\" % base_path)\n return base_path\n\ndef make_r_tmp():\n ''' Makes the r_tmp folder and returns the path to it'''\n r_tmp_path = \"/\".join([get_base_path(),\"r_tmp\"])\n success = QDir().mkpath(r_tmp_path)\n if not success:\n raise Exception(\"Could not create r_tmp path at %s\" % r_tmp_path)\n print(\"Made r_tmp_path at %s\" % r_tmp_path)\n return r_tmp_path\n\ndef to_posix_path(path):\n ''' for now, just changes \\ to /\n Assumes there are no escapes in the path, very important!'''\n\n new_path = path.replace('\\\\', '/')\n return new_path\n\ndef clear_r_tmp():\n r_tmp_dir = os.path.join(get_base_path(), \"r_tmp\")\n print(\"Clearing %s\" % r_tmp_dir)\n for file_p in os.listdir(r_tmp_dir):\n file_path = os.path.join(r_tmp_dir, file_p)\n try:\n if os.path.isfile(file_path):\n print(\"deleting %s\" % file_path)\n os.unlink(file_path) # same as remove\n except Exception, e:\n print e\n \ndef get_user_documents_path():\n docs_path = str(QDesktopServices.storageLocation(QDesktopServices.DocumentsLocation))\n return docs_path\n \n############## END OF HANDLE R_TEMP IN USER-AREA DIRECTORY ####################" }, { "alpha_fraction": 0.7353098392486572, "alphanum_fraction": 0.7487980723381042, "avg_line_length": 66.0545425415039, "blob_id": "d9eba1f7b972c4351748668e292d0f4c78a56ef1", "content_id": "5d4400bd49ec3d2dedd80d7a9179dd3a02c99088", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 7488, "license_type": "no_license", "max_line_length": 884, "num_lines": 110, "path": "/src/R/HSROC/man/HSROCSummary.Rd", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "\\name{HSROCSummary}\r\n\\alias{HSROCSummary}\r\n\r\n\\title{Summary statistics for HSROC models.}\r\n\\description{\r\n This function produces summaries for the HSROC model parameters\r\n}\r\n\r\n\\usage{\r\n\r\nHSROCSummary(data, burn_in = 0, iter.keep = NULL, Thin = 1, sub_rs=NULL, \r\n point_estimate = c(\"median\", \"mean\"), path = getwd(), chain = NULL,\r\n tv = NULL, digit = 6, print_plot = FALSE ) \r\n\r\n}\r\n\r\n\r\n\\arguments{ \r\n \\item{data}{a matrix with the number of rows equal to the number of studies and 4 columns. Each row consists of the entries of the 2x2 table of the index test (i.e. test under evaluation) vs. the reference test reported in each study. The ordering of the columns is ++, +-, -+, --, where the first entry refers to the result of the test under evaluation and the second entry refers to the result of the reference test. }\r\n \\item{burn_in}{The number of early iterations that are to be dropped. The default value is 0. }\r\n \\item{iter.keep}{ Maximum number of iteration we want to keep. }\r\n \\item{Thin}{a single numeric value. It sets the numerical field used to select every thin-th iteration to contribute to the estimates being calculated. The default value is 1. }\r\n \\item{sub_rs}{a list that describes the partition of the reference standard among the studies, if any. See details for further explanations.}\r\n \\item{point_estimate}{a character string indicating which method is to be used to calculate the estimates. One of \"median\" (default) or \"mean\", can be used }\r\n \\item{path}{a character string pointing to the directory where the SUMMARY files are to be stored. }\r\n \\item{chain}{ A list of character strings pointing to each directory, one for each chain, where the files created during the Gibbs sampler process are stored.}\r\n \\item{tv}{a list of true parameter values. See details for further explanations }\r\n \\item{digit}{integer indicating the number of decimal places to be used. The default value is 6. }\r\n \\item{print_plot}{logical. If TRUE, pdf files of trace, density and summary receiver operating characteristic (SROC) curve plots are saved in the \\code{path} working directory to help assess convergence of the Gibbs sampler. }\r\n}\r\n\r\n\\details{\r\n\r\nThe first element of the list-object \\code{sub_rs} corresponds to the number of different reference standards. The default value is 1. The number of additional elements will depend on the value of the first element. There must be as many additional element in \\code{sub_rs} as there are different reference standards. Assuming the studies are labelled 1, ..., N, \r\neach of these additional elements must be a vector (possibly of length one) taking as their values the labelling of the corresponding studies sharing the same reference standard. For example, if we have 2 reference tests, the first one aplied over study 1-10 and the second one applied over study 11-15 then the \\code{sub_rs} list-argument should be of length 3 with the following elements : 3, 1:10, 11:15\r\n\r\n\r\nIf the argument \\code{tv} is equal to \\code{NULL}, the function assumes the data are coming from a real life example. Otherwise, it assumes the data are coming from a simulated dataset in which case the user must provide the \\dQuote{true parameters} that were used to simulate the data for the within-study and between-study parameters and for the reference standards through the \\code{tv} argument. \r\nThe within-study parameters must be a matrix-like object with each column being true values for \\eqn{\\alpha_i}{alpha_i}, \\eqn{\\theta_i}{theta_i}, sensitivity of test under evaluation \\eqn{S_{1i}}{S1_i}, specificity of test under evaluation \\eqn{C_{1i}}{C1_i} and prevalence \\eqn{\\pi_i}{pi_i}. \r\nThe between-study parameters must be a vector of the following true values : \\eqn{\\Theta}{THETA}, \\eqn{\\sigma_{\\theta}}{sigma_theta}, \\eqn{\\Lambda}{LAMBDA}, \\eqn{\\sigma_{\\alpha}}{sigma_alpha} and \\eqn{\\beta}{beta}. The reference stadard initial values must be a 2 X \\code{sub_rs[[1]]} matrix-like object. \r\nThe first row must be the true values of the sensitivity of the reference standard, while the second row must correspond to true values of the specificity of the reference standard. The ordering described above in the within-study, between-study and reference standard true parameters must be preserved otherwise an error message will be displayed. \r\n\r\nIf the argument \\code{print_plot} is equal to \\code{TRUE}, the function will create and save in the \\code{path} working directory 3 type of plots to help the user judge if the descriptive statistics are reliable. First, a trace plot for each parameter will be created to help evaluate whether the Gibbs sampler has converged. Each trace plot is a scatter plot of the posterior sample of a single parameter vs the iteration number of the Gibbs sampler. Second, a density plot for each parameter will also be created. It plots a smoothed posterior kernel density estimate for each parameter. Finally, a SROC curve plot will also be created by the function. It summarizes the performance of diagnostic tests by plotting the relationship between the true positive rate and the false positive rate of the tests, as the threshold used to distinguish disease cases from noncases varies. \r\n\r\n}\r\n\r\n\\value{\r\n\r\nA list of : Point estimates and \\eqn{95\\%} highest posterior density (HPD) intervals of the between-study parameters, within-study parameters and reference standard parameters. All estimates are obtained via a Gibbs sampler process.\r\n\r\nIt also prints in the \\code{path} working directory a text file with a more complete summary of the results listed above (i.e. the results also include stadard errors and MC errors plus the posterior predictive value for the sensitivity and specificity of a new study that has not yet taken place). \r\n\r\n}\r\n\r\n\r\n\\examples{\r\n\r\n#REAL-LIFE EXAMPLES\r\n#\r\n#PLEASE NOTE THAT BOTH EXAMPLES BELOW ASSUME THE EXISTANCE OF POSTERIOR SAMPLES OBTAINED FROM THE 'HSROC' FUNCTION. \r\n#IN OTHER WORDS' ONE NEEDS TO RUN THE 'HSROC' FUNCTION BEFORE USING THE 'HSROCSUmmary' FUNCTION.\r\n#\r\n#Example 1\r\n#To get descriptive statistics and graphical summaries for the MRI data \r\n#(Scheidler et al. 1997) after dropping the first 5,000 iterations.\r\n\r\ndata(MRI)\t#load the data\r\n\\dontrun{\r\nHSROCSummary(data = MRI, burn_in=5000, print_plot=TRUE )\r\n}\r\n\r\n\r\n#Example 2\r\n#To get descriptive statistics and graphical summaries for the In.house \r\n#data (Pai et al. 2004) coming from 2 different chains. \r\n#We provide the path to each chain's directory, i.e. the directory where \r\n#all files created during the Gibbs sampler process are stored for \r\n#each chain. Let's assume there are two fictional directoies \r\n#chain_path = list(\"C:/path_to_chain_1\", \"C:/path_to_chain_2\").\r\n#Let's assume we drop the first 5,000 iterations and we use a thinning \r\n#interval of 10. \r\n\r\n\r\ndata(In.house)\t#load the data\r\n\\dontrun{\r\nHSROCSummary(data = In.house, burn_in=5000, Thin=10, \r\n\t\tchain=chain_path, print_plot=TRUE )\r\n}\r\n\r\n\\dontshow{\r\n\r\nx <- rnorm(1000)\r\ny <- as.mcmc(x)\t\r\nz <- HPDinterval(y)\r\n\r\n}\r\n\r\n}\r\n\r\n\r\n\\references{ \r\n\r\nScheidler J, Hricak H, Yu KK, Subak L, Segal MR. \\emph{Radiological evaluation of lymph node metastases in patients with cervical cancer : a meta-analysis}. Journal of the American Medical Association 1997 ; 278(13):1096-1101. \r\n\r\nPai, M. et al. (2004) \\emph{Nucleic acid amplification in the diagnosis of tuberculous pleuritis: a systematic review and meta-analysis}. BMC Infect Dis 2004, 4:6.\r\n\r\n}\r\n\r\n\r\n\\keyword{models}\r\n\r\n" }, { "alpha_fraction": 0.663690447807312, "alphanum_fraction": 0.6651785969734192, "avg_line_length": 32.650001525878906, "blob_id": "4cb6ad4a0e53318967c53f8ea834a455f305904a", "content_id": "c3ea39cb9e5fd8bc0eee3993fc19339662cb5a58", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 672, "license_type": "no_license", "max_line_length": 116, "num_lines": 20, "path": "/src/diagnostic_explain.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "from PyQt4.Qt import *\nimport forms.ui_diagnostic_explain_dlg\n\nfrom settings import update_setting\n\nclass DiagnosticExplain(QDialog, forms.ui_diagnostic_explain_dlg.Ui_diag_explain_window):\n \n def __init__(self, parent=None):\n super(DiagnosticExplain, self).__init__(parent)\n self.setupUi(self)\n\n QObject.connect(self.dont_show_again_chk_box, SIGNAL(\"stateChanged(int)\"), self.update_explain_diag_setting)\n\n def update_explain_diag_setting(self, state):\n field = \"explain_diag\"\n \n if state == Qt.Checked:\n update_setting(field, True)\n elif state == Qt.Unchecked:\n update_setting(field, False)" }, { "alpha_fraction": 0.5605738759040833, "alphanum_fraction": 0.5956771969795227, "avg_line_length": 44.225582122802734, "blob_id": "daa4865a435d90a79b0734f6ee7958eaeb4a6289", "content_id": "79c7bc17d00d8b4afd8e9fe09a370c19edebfb14", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 58342, "license_type": "no_license", "max_line_length": 284, "num_lines": 1290, "path": "/src/R/multinomial_tmp/newfuns.r", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "library(BRugs) \n \nrun.multi <-\nfunction (Data, outcomes, miss.patterns, N, col.study = \"TRIAL\", col.treat = \"TREAT\", covs = NULL, model = \"baseline\", var.model = \"ind\",\n nChains = 3, conv.limit = 1.1, niters = 300000, nruns = 5000, setsize = 2000, dprior = list(\"norm\",0,1e-6),slopeprior = list(\"norm\",0,1e-6), \n Etaprior=list(\"norm\",0,0.25), varprior=list(\"prec\",\"wish\"),varprior.params = c(1,5), path = \"C:/dev/OpenMeta-analyst-/R/multinomial_tmp/multinomial/\")\n{\n# Data is name of R data frame with rows and all columns including incomplete ones\n# outcomes is set of columns with outcome data including summed columns\n# miss.patterns is list with missing data patterns\n# each element of list is separate missing data pattern with three subelements\n# first subelement is set of rows of dat corresponding to this missing data pattern\n# second subelement is indicator matrix where each row gives linear combination of columns of outcome data producing one multinomial element\n# third subelement is indicator matrix where each rows gives linear combination of full set of multinomial probabilities corresponding to collapsed set of probabilities\n# col.study is column with trial labels\n# col.treat is column with treatment labels\n# covs is column number(s) of covariates in meta-regression model; if none given, it is NULL\n# model is either \"baseline\" for baseline logit models or \"constant\" for model that compares each category to all others\n# var.model is \"homogeneous\",\"independence\", or \"consistent\" (see Lu and Ades, 2009)\n# nChains is number of MCMC chains to run\n# conv.limit is limit for BGR convergence\n# niters is number of MCMC iterations to run\n# nruns is number of MCMC iterations to save\n# setsize is number of MCMC iterations after which convergence diagnostics should be checked\n# dprior is prior for treatment effects as 3-list (distribution, mean, precision)\n# slopeprior is prior for slope as 3-list (distribution, mean, precision)\n# Etaprior is prior for random study effects as 3-list (distribution, mean, precision)\n# varprior is 2-list of parameter and distribution upon which prior for variance is put \n# choices for parameter: precision, variance, sd\n# choices for distribution: gamma, Wishart, uniform\n# varprior.params is set of parameters of variance distribution\n# path is directory where data are stored\n#\n if (missing(miss.patterns)) {\n\t\t\tarms <- seq(dim(Data)[1])\n#\t\t\t miss.patterns = NULL\n\t\t\tncat = length(outcomes)\n\t\t\tmiss.patterns = list(list(list(seq(arms),seq(ncat))) ,diag(ncat))\n }\n\t else {\n arms <- vector(\"integer\")\n\t\t\tncat <- dim(miss.patterns[[2]])[2] # Number of outcome categories\n for (i in seq(length(miss.patterns[[1]])))\n arms <- c(arms,miss.patterns[[1]][[i]][[1]])\n }\n narm <- length(arms)\n dat <- Data[arms,] # Reorder data in order of missing data patterns\n N <- N[arms]\n\t\tstudy <- sort.vec(dat[, col.study])\n\t\ttreat <- sort.vec(dat[, col.treat])\n nstudy <- length(unique(study))\n ntreat <- length(unique(treat))\n ref <- 1 #Reference treatment\n base <- make.base.treat(dat,col.study,col.treat,narm)\n b.id <- base$b.id\n base.treat <- base$base.treat\n D <- Data[, outcomes]\t#Outcomes for studies\n\t\tarms.by.study <- rle(dat[,col.study])$lengths #Number of arms in studies\n nitems <- dim(D)[1] # number of complete data rows\n if(is.null(covs)) {\n x <- NULL\n pars.to.save <- c(\"d\", \"Eta\", \"Delta\", \"Prec\", \"P\", \"dev\")\n }\n else {\n x <- as.matrix(dat[seq(nitems), covs])\n pars.to.save <- c(\"d\", \"slope\", \"Eta\", \"Delta\", \"Prec\", \"P\", \"dev\")\n }\n\t\tprior <- multi.prior(ncat,covs,dprior,Etaprior,slopeprior,varprior,varprior.params)\n multi.inits(D, x, treat, study, nChains, narm, ntreat, ncat, nstudy, arms.by.study, b.id, model,var.model, miss.patterns,N, path)\n multi.data(Data, covs, outcomes, miss.patterns, N, prior, narm, ntreat, ncat, nstudy, study, treat, base.treat, path)\n\t\tmulti.model(dat,miss.patterns,narm, ncat, model, prior,covs, var.model, varprior, path)\n bugs.out <- multi.bugs.fit(\"model.txt\", \"data.txt\", nChains, paste(\"inits\", seq(nChains), \".txt\", sep = \"\"), path, pars.to.save, setsize, conv.limit, niters, nruns, covs)\n burn.in <- bugs.out[[1]]\n no.runs <- bugs.out[[2]]\n stats <- bugs.out[[3]]\n#\t\tdic <- bugs.out[[4]]\n samples <- bugs.out[[4]]\n varnames <- dimnames(samples)[[3]]\n nvars <- dim(samples)[3]\n d.vars <- grep(\"d\", varnames)\n d <- aperm(array(matrix(samples[, , d.vars], c(no.runs * nChains, length(d.vars))), c(no.runs * nChains, ncat - 1, ntreat - 1)), c(1, 3, 2))\n P.vars <- grep(\"P\", varnames)\n P <- aperm(array(matrix(samples[, , P.vars], c(no.runs * nChains, length(P.vars))), c(no.runs * nChains, ncat, narm)), c(1, 3, 2))\n Prec.vars <- grep(\"Prec\", varnames)\n Prec <- aperm(array(matrix(samples[, , Prec.vars], c(no.runs * nChains, length(Prec.vars))), \n c(no.runs * nChains, ncat - 1, ncat - 1, ntreat - 1, ntreat - 1)), c(1, 5, 4, 3, 2))\n#\t\tdev.vars = grep(\"dev\", varnames)\n#\t\tdev <- aperm(array(matrix(samples[, , dev.vars], c(no.runs * nChains, length(dev.vars))), c(no.runs * nChains, ncat, narm)), c(1, 3, 2))\n if (!is.null(covs)) {\n \tslope.vars <- grep(\"slope\", varnames)\n \tslope <- aperm(array(matrix(samples[, , slope.vars], c(no.runs * nChains, length(slope.vars))), c(no.runs * nChains, ncat - 1, ntreat - 1)), c(1, 3, 2))\n }\n if (is.null(covs)) {\n#\t\t\tout <- list(burn.in, no.runs, stats, dic, d, Prec, P, samples)\n#\t\t\tnames(out) <- c(\"Burn In\", \"Number runs per chain\", \"Stats\",\"DIC Stats\",\"d\",\"Prec\",\"P\", \"MCMC Samples\")\n\t\t\tout <- list(burn.in, no.runs, stats, d, Prec, P, D, N, ncat, miss.patterns, samples)\n\t\t\tnames(out) <- c(\"Burn In\", \"Number runs per chain\", \"Stats\",\"d\",\"Prec\",\"P\", \"Data\", \"N\", \"No.Categories\", \"Missing.Data.Patterns\", \"MCMC Samples\")\n\t\t}\n\t\telse {\n\t\t\tout <- list(burn.in, no.runs, stats, d, slope, Prec, P, D, N, ncat, miss.patterns, samples)\n\t\t\tnames(out) <- c(\"Burn In\", \"Number runs per chain\", \"Stats\",\"d\",\"Slopes\",\"Prec\",\"P\", \"Data\", \"N\", \"No.Categories\", \"Missing.Data.Patterns\", \"MCMC Samples\")\n\t\t}\n return(out)\n}\n\nmake.base.treat <-\nfunction (dat,col.study,col.treat,narm)\n{\n# Create base treatment vector and base treat id vector\n study.lengths <- rle(dat[,col.study])$lengths\n nstudy <- length(study.lengths)\n ends <- cumsum(study.lengths) # End row of trials\n starts <- c(1, ends[-length(ends)] + 1) # Start row of trials\n b.treat <- rep(NA, nstudy)\n b.id <- rep(F, narm)\n for (i in 1:nstudy) {\n limits <- starts[i]:ends[i] # Start and end rows of study i\n b.treat[i] <- min(dat[limits, col.treat]) # Base treatment (lowest numbered) for study i\n b.id[limits[b.treat[i] == dat[limits, col.treat]]] <- T # True if arm is base treatment\n }\n base.treat <- rep(b.treat, each = study.lengths) # Vector of base treat by study replicated within study\n out <- list(b.id,base.treat)\n names(out) <- c(\"b.id\",\"base.treat\")\n return(out)\n}\n\n\nmulti.prior<-\nfunction (ncat,covs,dprior,Etaprior,slopeprior,varprior,varprior.params) \n{\n\tif (ncat > 2) {\n\t\tmean.d = rep(dprior[[2]],ncat-1)\n\t\tPrec.d = diag(dprior[[3]],ncat-1,ncat-1)\n\t\tPrior.d <- paste(\"dm\",dprior[[1]],\"(mean.d[], Prec.d[,])\",sep=\"\")\n\t\tmean.Eta = rep(Etaprior[[2]],ncat-1)\n\t\tPrec.Eta = diag(Etaprior[[3]],ncat-1,ncat-1)\n\t\tPrior.Eta <- paste(\"dm\",Etaprior[[1]],\"(mean.Eta[],Prec.Eta[,])\",sep=\"\")\n\t\tif (!is.null(covs)) {\n\t\t\tmean.slope = rep(slopeprior[[2]],ncat-1)\n\t\t\tPrec.slope = dag(slopeprior[[3]],ncat-1,ncat-1)\n\t\t\tPrior.slope <- paste(\"dm\",slopeprior[[1]],\"(mean.slope[],Prec.slope[,])\",sep=\"\")\n\t\t}\n\t\t\n\t\tif (varprior[[1]] == \"prec\") {\n\t\t\tif (varprior[[2]] == \"wish\") {\n\t\t\t\tPrec.1 = diag(varprior.params[1],ncat-1)\n\t\t\t\tPrec.2 = varprior.params[2]\n\t\t\t\tPrior.Prec <- paste(\"d\",varprior[[2]],\"(Prec.1[,], Prec.2)\",sep=\"\")\n\t\t\t}\n\t\t\telse return(\"Only Wishart currently supported for multinomial precision distribution\")\n\t\t}\n\t}\n\telse {\n\t\tmean.d = dprior[[2]]\n\t\tPrec.d = dprior[[3]]\n\t\tPrior.d <- paste(\"d\",dprior[[1]],\"(mean.d,Prec.d)\",sep=\"\")\n\t\tmean.Eta = Etaprior[[2]]\n\t\tPrec.Eta = Etaprior[[3]]\n\t\tPrior.Eta <- paste(\"d\",Etaprior[[1]],\"(mean.Eta,Prec.Eta)\",sep=\"\")\n\t\tif (!is.null(covs)) {\n\t\t\tmean.slope = slopeprior[[2]]\n\t\t\tPrec.slope = slopeprior[[3]]\n\t\t\tPrior.slope <- paste(\"d\",slopeprior[[1]],\"(mean.slope,Prec.slope)\",sep=\"\")\n\t\t}\n\t\t\n\t\tif (varprior[[1]] == \"prec\") {\n\t\t\tif (varprior[[2]] == \"gamma\") {\n\t\t\t\tPrec.1 = varprior.params[1]\n\t\t\t\tPrec.2 = varprior.params[2]\n\t\t\t\tPrior.Prec <- paste(\"d\",varprior[[2]],\"(Prec.1, Prec.2)\",sep=\"\")\n\t\t\t}\n\t\t\telse return(\"Only Gamma currently supported for precision distribution\")\n\t\t}\n\t\telse if (varprior[[1]] == \"sd\") {\n\t\t\tif (varprior[[2]] == \"unif\") {\n\t\t\t\tPrec.1 = varprior.params[1]\n\t\t\t\tPrec.2 = varprior.params[2]\n\t\t\t\tPrior.Prec <- paste(\"d\",varprior[[2]],\"(Prec.1, Prec.2)\",sep=\"\")\n\t\t\t}\n\t\t\telse return(\"Only Uniform currently supported for sd distribution\")\n\t\t}\n\t}\n#\t Prec.1 <- 1\n#\t Prec.d <- 0.000001\n#\t Prec.slope <- 0.000001\n#\t Prec.2 <- 1\n#\t Prec.Eta <- .25\n#\t}\n\n if (is.null(covs)) {\n\t out <- list(mean.d, mean.Eta, Prec.1, Prec.2, Prec.d, Prec.Eta, Prior.Prec, Prior.d, Prior.Eta)\n\t names(out) <- c(\"mean.d\", \"mean.Eta\", \"Prec.1\", \"Prec.2\", \"Prec.d\", \"Prec.Eta\", \"Prior.Prec\", \"Prior.d\", \"Prior.Eta\")\n }\n else {\n\t out <- list(mean.slope, mean.d, mean.Eta, Prec.1, Prec.2, Prec.d, Prec.Eta, Prec.slope,\n\t\t\t\t Prior.Prec, Prior.d, Prior.Eta, Prior.slope)\n\t names(out) <- c(\"mean.slope\", \"mean.d\", \"mean.Eta\", \"Prec.1\", \"Prec.2\", \"Prec.d\", \"Prec.Eta\", \"Prec.slope\", \n\t\t\t\t\t \"Prior.Prec\", \"Prior.d\", \"Prior.Eta\", \"Prior.slope\")\n }\n return(out)\n}\n\nmulti.inits <-\nfunction (D, x, treat, study, nChains, narm, ntreat, ncat, nstudy, arms.by.study, b.id, model,var.model, miss.patterns, N, path) \n{\n# Computes initial values for baseline category logits model without covariates\n# Inputs:\n# D matrix of data outcomes\n# x matrix of predictors\n# treat vector of treatment labels\n# study vector of study labels\n# nChains number of MCMC chains\n# narm number of study arms\n# ntreat number of treatments\n# ncat number of outcome categories\n# nstudy number of studies\n# arms.by.study number of arms in each study\n# b.id indicator for base treatment in arms\n# model \"baseline\" if baseline category logits model (alternative is logit model)\n# var.model model for variance (\"ind\",\"hom\",\"cons\")\n# miss.patterns missing data patterns\n# N number observations by study\n# path path to save initial value file\n\tif (length(miss.patterns[[1]])==1)\n\t\tDimputed = D\n\telse\n\t\tDimputed = multi.impute.data(D,miss.patterns,ncat,N)\n \n\tif (model == \"baseline\") {\n\t\tlogits <- as.matrix(log(Dimputed[, -1]) - log(Dimputed[, 1]))\n\t\tse.logits <- as.matrix(sqrt(1/Dimputed[, -1] + 1/Dimputed[, 1]))\n\t\tEta <- se.Eta <- matrix(NA, nstudy, ncat)\n\t\tEta[, 2:ncat] <- logits[b.id, ]\n\t\tse.Eta[, 2:ncat] <- se.logits[b.id, ]\n\t\tdelta <- logits - apply(as.matrix(Eta[, -1]), 2, rep, times = arms.by.study)\n\t}\n\telse if (model == \"logit\") {\n\t\tN <- apply(Dimputed, 1, sum)\n\t\tlogits <- as.matrix(log(Dimputed/(N-Dimputed)))\n\t\tse.logits <- as.matrix(sqrt(1/Dimputed + 1/(N-Dimputed)))\n\t\tEta <- se.Eta <- matrix(NA, nstudy, ncat-1)\n\t\tEta <- as.matrix(logits[b.id,-1])\n\t\tse.Eta <- as.matrix(se.logits[b.id,-1])\n\t\tdelta <- logits[,-1] - apply(Eta,2,rep,times=arms.by.study)\n\t}\n\td <- se.d <- matrix(NA, length(unique(treat)), ncat-1)\n\trows.of.basetreat <- seq(dim(Dimputed)[1])*as.numeric(b.id)\n\tdelta <- delta[-rows.of.basetreat,,drop=F] # Eliminate base treatment arms\n\tbase.tx <- treat[b.id] # base treatment for N studies\n\tend.study <- c(0, cumsum(arms.by.study)) # end row number of each trial\n\trows <- end.study - seq(0, nstudy) # end number of each trial not including base treatment arms\n\tdesign.mat <- matrix(0, narm-nstudy, ntreat)\n\tfor (i in seq(nstudy)) {\n\t\tstudytx <- treat[(end.study[i]+1):end.study[i+1]] #treatments in ith study\n\t\tnonbase.tx <- studytx[studytx!=base.tx[i]] #non-baseline treatments for ith study\n\t\tdesign.mat[(1+rows[i]):rows[i+1],base.tx[i]] <- -1\n\t\tfor (j in seq(length(nonbase.tx)))\n\t\t\tdesign.mat[j+rows[i],nonbase.tx[j]] <- 1\t\n\t}\n\tdesign.mat <- design.mat[,-1,drop=F]\n\tunique.treat = sort(unique(treat))\n\tif (!is.null(x)) {\n\t\tncov <- dim(x)[2]\n\t\tslope <- se.slope <- array(NA, c(length(unique(treat)), ncov, ncat-1))\n\t}\n\tfor (k in 1:(ncat-1)) {\n\t\tif (is.null(x)) {\n\t\t\tfit <- summary(lm(delta[, k] ~ design.mat - 1))\n\t\t}\n\t\telse {\n\t\t\tx.cen <- x[-rows.of.basetreat,,drop=F]\n\t\t\tx.cen <- x.cen - apply(x.cen,2,mean)\n\t\t\tfit <- summary(lm(delta[,k]~ design.mat + design.mat:x.cen-1))\n\t\t\tslope[treat[-1],,k] <- coef(fit)[ntreat-1 + seq((ntreat-1)*ncov),1]\n\t\t\tif (!is.nan(fit$fstat[1])) se.slope[treat[-1],,k] <- coef(fit)[ntreat-1 + seq((ntreat-1)*ncov),2]\n\t\t\telse se.slope[treat[-1],,k] <- 1\n\t\t}\n\t\td[unique.treat[-1],k] <- coef(fit)[1:(ntreat-1), 1]\n\t\tif (!is.nan(fit$fstat[1])) {\n\t\t\tse.d[unique.treat[-1],k] <- coef(fit)[1:(ntreat-1), 2]\n\t\t\ttau <- fit$sigma^2\n\t\t}\n\t\telse {\n\t\t\tse.d[unique.treat[-1],k] <- 1\n\t\t\ttau = 1\n\t\t}\n\t\td[1,k] <- se.d[1,k] <- NA\n\t\tif (!is.null(x)) \n\t\t\tslope[1,,k] <- se.slope[1,,k] <- NA\n\t}\n\tif (model == \"baseline\") {\n\t\tDelta <- matrix(NA, narm, ncat)\n\t\tDelta[b.id,2:ncat] <- 0\n\t\tDelta[seq(narm)[!b.id],2:ncat] <- delta\n\t}\n\telse if (model == \"logit\") {\n\t\tDelta <- matrix(NA, narm, ncat-1)\n\t\tDelta[b.id,] <- 0\n\t\tDelta[seq(narm)[!b.id],] <- delta\n\t}\n\trandom.Eta1 <- matrix(rnorm(dim(Eta)[1]*dim(Eta)[2]),dim(Eta)[1],dim(Eta)[2])\n\trandom.Eta2 <- matrix(rnorm(dim(Eta)[1]*dim(Eta)[2]),dim(Eta)[1],dim(Eta)[2])\n\trandom.d1 <- matrix(rnorm(dim(d)[1]*dim(d)[2]),dim(d)[1],dim(d)[2])\n\trandom.d2 <- matrix(rnorm(dim(d)[1]*dim(d)[2]),dim(d)[1],dim(d)[2])\n\tif (!is.null(x)) {\n\t\trandom.slope1 <- matrix(rnorm(dim(slope)[1]*dim(slope)[2]),dim(slope)[1],dim(slope)[2])\n\t\trandom.slope2 <- matrix(rnorm(dim(slope)[1]*dim(slope)[2]),dim(slope)[1],dim(slope)[2])\n\t}\n\tdimD1 <- dim(Dimputed)[1]\n\tif (var.model == \"ind\") {\n\t\tPrec <- multi.inits.ind(ncat,ntreat,tau)\n\t\tif (ncat > 2) {\n\t\t\trandom.ISigma1 <- array(rchisq(dim(Prec)[1]*dim(Prec)[2]*dim(Prec)[3]*dim(Prec)[4],dimD1-1),c(dim(Prec)[1],dim(Prec)[2],dim(Prec)[3],dim(Prec)[4]))\n\t\t\trandom.ISigma2 <- array(rchisq(dim(Prec)[1]*dim(Prec)[2]*dim(Prec)[3]*dim(Prec)[4],dimD1-1),c(dim(Prec)[1],dim(Prec)[2],dim(Prec)[3],dim(Prec)[4]))\n\t\t}\n\t\telse {\n\t\t\trandom.ISigma1 <- array(rchisq(dim(Prec)[1]*dim(Prec)[2],dimD1-1),c(dim(Prec)[1],dim(Prec)[2]))\n\t\t\trandom.ISigma2 <- array(rchisq(dim(Prec)[1]*dim(Prec)[2],dimD1-1),c(dim(Prec)[1],dim(Prec)[2]))\n\t\t}\n\t}\n\telse if (var.model == \"hom\") {\n\t\tPrec <- multi.inits.hom(ncat,ntreat,tau)\n\t\trandom.ISigma1 <- rchisq(1,dimD1-1)\n\t\trandom.ISigma2 <- rchisq(1,dimD1-1)\n\t}\n\telse if (var.model == \"consis\") Prec <- multi.inits.ind(ncat,ntreat,tau)\n\n\t\n\t\n\tif (is.null(x)) {\n\t\tinits.1 <- list(Eta = Eta, d = d, Delta = Delta, Prec = Prec)\n#\t\tinits.2 <- list(Eta = Eta + se.Eta/2, d = d, Delta = Delta, Prec = Prec * (dimD1 - 1)/qchisq(0.975, dimD1 - 1))\n#\t\tinits.3 <- list(Eta = Eta - se.Eta/2, d = d - se.d/2, Delta = Delta, Prec = Prec * (dimD1 - 1)/qchisq(0.025, dimD1 - 1))\n\t\tinits.2 <- list(Eta = Eta + se.Eta*random.Eta1, d = d + se.d*random.d1, Delta = Delta, Prec = Prec * (dimD1 - 1)/random.ISigma1)\n\t\tinits.3 <- list(Eta = Eta + se.Eta*random.Eta2, d = d + se.d*random.d2, Delta = Delta, Prec = Prec * (dimD1 - 1)/random.ISigma2)\n\t}\n\telse {\n\t\tinits.1 <- list(Eta = Eta, d = d, Delta = Delta, slope = slope, Prec = Prec)\n#\t\tinits.2 <- list(Eta = Eta + se.Eta/2, d = d, Delta = Delta, slope = slope, Prec = Prec*(dimD1 - 1)/qchisq(0.975, dimD1 - 1))\n#\t\tinits.3 <- list(Eta = Eta - se.Eta/2, d = d - se.d/2, Delta = Delta, slope = slope - se.slope/2, Prec = Prec*(dimD1 - 1)/qchisq(0.025, dimD1 - 1))\n\t\tinits.2 <- list(Eta = Eta + se.Eta*random.Eta1, d = d + se.d*random.d1, Delta = Delta, slope = slope + se.slope*random.slope1, Prec = Prec*(dimD1 - 1)/random.ISigma1)\n\t\tinits.3 <- list(Eta = Eta + se.Eta*random.Eta2, d = d + se.d*random.d2, Delta = Delta, slope = slope + se.slope*random.slope2, Prec = Prec*(dimD1 - 1)/random.ISigma2)\n\t}\n\tinInits <- list(inits.1,inits.2,inits.3)\n\tinits <- c(\"inits1.txt\", \"inits2.txt\", \"inits3.txt\")\n bugsInits(inInits, nChains, paste(path, inits, sep = \"\"))\n}\n\n\nmulti.impute.data <- function(D, miss.patterns, ncat, N, probs.to.impute)\n{\n\tD = as.matrix(D)\n\tDimputed = matrix(NA,dim(D)[1],ncat)\n\tcount = 0\n\tif(missing(probs.to.impute)) imputed.prop = rep(1/ncat,ncat)\n\telse imputed.prop = probs.to.impute\n\tfor (i in seq(length(miss.patterns[[1]]))) {\n\t\trows = miss.patterns[[1]][[i]][[1]] #data rows in missing data pattern\n\t\tcols.data = miss.patterns[[1]][[i]][[2]] #data columns in missing data pattern\n\t\tis.complete.cols = cols.data %in% seq(ncat) #which data columns are complete\n\t\tif (any(is.complete.cols)) {\n\t\t\tcomplete.cols = cols.data[is.complete.cols] #col no. of complete cols\n\t\t\tincomplete.cols = cols.data[!is.complete.cols] #col nos. of incomplete cols\n\t\t\tDimputed[rows, complete.cols] = D[rows, complete.cols] #Put in complete data\n\t\t}\n\t\telse\n\t\t\tincomplete.cols = cols.data\n\t\tif (!all(is.complete.cols)) { #If some columns with missing data\n\t\t\tpmat = miss.patterns[[2]][incomplete.cols,,drop=F] #Parameters corresponding to incomplete cols\n\t\t\tif (any(is.complete.cols)) {\n\t\t\t\tsums.to.split = D[rows, incomplete.cols, drop=F] - D[rows, complete.cols, drop=F]%*%t(pmat[, complete.cols,drop=F])\n\t\t\t\tpmat[,complete.cols] = 0\n\t\t\t\timputed.prop[complete.cols] = 0\n\t\t\t}\n\t\t\telse\n\t\t\t\tsums.to.split = D[rows, incomplete.cols, drop=F]\n\t\t\timputed.prop = imputed.prop/sum(imputed.prop)\n#\t\t\tno.to.split = D[rows, incomplete.cols,drop=F] - apply(D[rows,complete.cols,drop=F],1,sum,na.rm=T)\n#\t\t\tno.summands = apply(parsmat,1,sum) #no. parameters in each linear combination\n#\t\t\texpand.summands = matrix(rep(no.summands,each=length(rows)),ncol=dim(parsmat)[1])\n#\t\t\tno.to.split=round((D[rows, incomplete.cols,drop=F] - apply(D[rows,complete.cols,drop=F],1,sum,na.rm=T)) / expand.summands, 0)#Split combination sums equally\n\t\t\tfor (j in seq(length(rows))) {\n\t\t\t\tx0 = matrix(rep(sums.to.split [j,], each=ncat),ncol=length(incomplete.cols))*t(pmat)\n\t\t\t\tx1 = imputed.prop*t(pmat)\n\t\t\t\tx2 = x0*x1/rep(apply(x1,2,sum),each=ncat,ncol=dim(pmat)[1])\n#\t\t\t\tx1 = matrix(rep(no.to.split[j,], each=ncat), ncol=length(incomplete.cols))\n#\t\t\t\tx2 = x1*t(pmat) # zero out the unneeded parameters\n#\t\t\t\tif (any(is.complete.cols)) x2[complete.cols,] = 0 # zero out the rows with complete data\n\t\t\t\tx2[x2==0] = NA\n\t\t\t\tx3 = apply(x2, 1, mean, na.rm=T) # average across potential imputed values\n\t\t\t\tx5 = (N[rows[j]]- sum(Dimputed[rows[j],], na.rm=T))/sum(x3, na.rm=T) #Factor to adjust imputations\n\t\t\t\tx6 = round(x3*x5) # Apply factor to imputations\n\t\t\t\tif (any(is.complete.cols))\n\t\t\t\t\tDimputed[rows[j],seq(ncat)[-complete.cols]] = x6[!is.na(x6)]\n\t\t\t\telse\n\t\t\t\t\tDimputed[rows[j],seq(ncat)] = x6[!is.na(x6)]\n\t\t\t\tDimputed[rows[j],1] = Dimputed[rows[j],1] + N[rows[j]] - sum(Dimputed[rows[j],]) #Correction for rounding so totals add\n\t\t\t}\n\t\t}\n\t\tif (missing(probs.to.impute)) {\n\t\t\trunning.total = apply(Dimputed,2,sum,na.rm=T)\n\t\t\timputed.prop = running.total/sum(running.total) # Proportion of events in each category\n\t\t}\n\t\telse\n\t\t\timputed.prop = probs.to.impute\n\t}\n\treturn(Dimputed)\n}\n\n\nmulti.inits.ind <- function(ncat,ntreat,tau)\n{\n\tif (ncat > 2) \n\t\tPrec <- array(NA, c(ntreat - 1, ntreat, ncat - 1, ncat - 1))\n\telse \n\t\tPrec <- array(NA, c(ntreat - 1, ntreat))\n\tfor (i in 1:(ntreat - 1)) {\n\t\tfor (j in (i + 1):ntreat) {\n\t\t\tif (ncat > 2) \n\t\t\t\tPrec[i, j, , ] <- (1/tau) * diag(ncat - 1)\n\t\t\telse \n\t\t\t\tPrec[i, j] <- 1/tau\n\t\t}\n\t}\n\treturn(Prec)\n}\n\nmulti.inits.hom <- function(ncat,ntreat,tau)\n{\n\tif (ncat > 2) \n\t\tPrec <- (1/tau) * diag(ncat - 1)\n\telse \n\t\tPrec <- 1/tau\n\treturn(Prec)\n}\n\nmulti.data <-\nfunction (dat, covs, outcomes, miss.patterns, N, prior, narm, ntreat, ncat, nstudy, study, treat, base.treat, path) \n{\n if (length(miss.patterns[[1]])==1)\n\t\t\tD = as.matrix(dat[,outcomes])\n else {\n\t\t\tD <- matrix(NA, narm, ncat)\n\t\t\tcount <- 0\n\t\t\tfor (i in seq(length(miss.patterns[[1]]))) {\n\t\t\t\trows <- miss.patterns[[1]][[i]][[1]] # rows in ith pattern\n\t\t\t\tncombs <- length(miss.patterns[[1]][[i]][[2]]) #number of parameter combinations in ith pattern\n\t\t\t\tD[count + seq(length(rows)), seq(ncombs)] <- as.matrix(dat[rows, outcomes[miss.patterns[[1]][[i]][[2]]]]) # full data for ith pattern\n\t\t\t\tcount <- count + length(rows) # advance count to next set of rows\n }\n }\n corr.factor <- N/apply(D,1,sum,na.rm=T) # correction factor for multinomial probabilities to account for duplicate sums\n if (is.null(covs)) {\n \tif (length(miss.patterns[[1]])==1)\n inData <- list(narm = narm, ntreat = ntreat, ncat = ncat,nstudy = nstudy,\n study = study, base.treat = base.treat, treat = treat, D = D, \n Prec.1 = prior$Prec.1, Prec.2 = prior$Prec.2, Prec.d = prior$Prec.d, Prec.Eta = prior$Prec.Eta, \n mean.d = prior$mean.d, mean.Eta = prior$mean.Eta)\n else\n\t\t\t\tinData <- list(narm = narm, ntreat = ntreat, ncat = ncat,nstudy = nstudy,\n study = study, base.treat = base.treat, treat = treat, D = D, \n Prec.1 = prior$Prec.1, Prec.2 = prior$Prec.2, Prec.d = prior$Prec.d, Prec.Eta = prior$Prec.Eta, \n mean.d = prior$mean.d, mean.Eta = prior$mean.Eta,corr.factor=corr.factor)\n \n }\n else {\n \tx <- as.matrix(dat[, covs])\n \tncov <- dim(x)[2]\n if (length(miss.patterns[[1]])==1)\n inData <- list(narm = narm, ntreat = ntreat, ncat = ncat, nstudy = nstudy, ncov=ncov,\n study = study, base.treat = base.treat, treat = treat, D = D, x = x, \n Prec.1 = prior$Prec.1, Prec.2 = prior$Prec.2, Prec.d = prior$Prec.d, Prec.Eta = prior$Prec.Eta, Prec.slope=prior$Prec.slope,\n mean.d = prior$mean.d, mean.Eta = prior$mean.Eta, mean.slope = prior$mean.slope)\n else\n \tinData <- list(narm = narm, ntreat = ntreat, ncat = ncat, nstudy = nstudy, ncov=ncov,\n study = study, base.treat = base.treat, treat = treat, D = D, x = x, \n Prec.1 = prior$Prec.1, Prec.2 = prior$Prec.2, Prec.d = prior$Prec.d, Prec.Eta = prior$Prec.Eta, Prec.slope=prior$Prec.slope,\n mean.d = prior$mean.d, mean.Eta = prior$mean.Eta, mean.slope = prior$mean.slope, corr.factor=corr.factor)\n }\n bugsData(inData, paste(path, \"data.txt\", sep = \"\"))\n}\n\n\n\n\n\nmulti.model <-\nfunction (dat,miss.patterns,narm, ncat, model, prior,covs, var.model, varprior, path) \n{\n# Inputs \n# dat dataset (used if complete data)\n# miss.patterns # missing data patterns\n# ncat # outcome categories\n# model baseline or constant\n#\t\t var.model\t\t\t homogeneous, independent, consistent\n\tcat(\"model\\n{\\n\",file=paste(path,\"model.txt\",sep=\"\"))\n#\tif (is.null(miss.patterns)) multi.model.completedata(dat,narm,ncat,path)\n#\telse \n#\tmulti.model.missingdata(miss.patterns, ncat,path)\n\tif (length(miss.patterns[[1]])==1) complete = T\n\telse complete = F\n\tmiss.patterns.nobs <- miss.patterns.nprobs <- integer(length(miss.patterns[[1]]))\n\tfor (i in seq(length(miss.patterns[[1]]))) {\n\t\tmiss.patterns.nprobs[i] <- length(miss.patterns[[1]][[i]][[2]]) #number of parameter combinations in ith pattern\n\t\tmiss.patterns.nobs[i] <- length(miss.patterns[[1]][[i]][[1]]) # number of studies in ith pattern\n\t}\n \n\tfor (i in seq(length(miss.patterns[[1]]))) {\n\t\tcat(\"\\n for (i in 1:\", miss.patterns.nobs[i],\") {\\n\", file = paste(path,\"model.txt\",sep=\"\"), append = T,sep=\"\")\n\t\tif (i==1) {\n\t\t\tcat(\" D[i,1:\", miss.patterns.nprobs[i], \"] ~ dmulti(P\",i,\"[i,],N\",i,\"[i])\\n\", file = paste(path,\"model.txt\",sep=\"\"), append = T,sep=\"\")\n\t\t\tcat(\" N\",i,\"[i] <- sum(D[i,1:\", miss.patterns.nprobs[i], \"])\\n\", file = paste(path,\"model.txt\",sep=\"\"), append = T,sep=\"\")\n\t\t}\n\t\telse {\n\t\t\tcat(\" D[i+\", cumsum(miss.patterns.nobs)[i-1], \",1:\", miss.patterns.nprobs[i], \"] ~ dmulti(P\",i,\"[i,],N\",i,\"[i])\\n\", file = paste(path,\"model.txt\",sep=\"\"), append = T,sep=\"\")\n\t\t\tcat(\" N\",i,\"[i] <- sum(D[i+\", cumsum(miss.patterns.nobs)[i-1], \",1:\", miss.patterns.nprobs[i], \"])\\n\", file = paste(path,\"model.txt\",sep=\"\"), append = T,sep=\"\")\n\t\t}\n\t\tfor (j in seq(miss.patterns.nprobs[i]-1)) {\n\t\t\tcombo <- miss.patterns[[2]][miss.patterns[[1]][[i]][[2]][j],]\n\t\t\tpos <- seq(ncat)[as.logical(abs(combo))]\n\t\t\tsigns <- ifelse(combo[pos]==1,\"+\",\"-\") \n\t\t\tif (i==1) {\n\t\t\t\tif (length(pos)>1) {\n\t\t\t\t\tstr <- paste(signs[-1],\"P[i,\",pos[-1],\"]\",sep=\"\")\n\t\t\t\t\tif (complete) \n\t\t\t\t\t\tcat(\" P\",i,\"[i,\", j, \"] <- (P[i,\",pos[1],\"]\",str,\")\\n\",file = paste(path,\"model.txt\",sep=\"\"), append = T, sep=\"\")\n\t\t\t\t\telse\n\t\t\t\t\t\tcat(\" P\",i,\"[i,\", j, \"] <- (P[i,\",pos[1],\"]\",str,\")*corr.factor[i]\\n\",file = paste(path,\"model.txt\",sep=\"\"), append = T,sep=\"\")\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t\tif (complete)\n\t\t\t\t\t\tcat(\" P\",i,\"[i,\", j, \"] <- P[i,\",pos[1],\"]\\n\",file = paste(path,\"model.txt\",sep=\"\"), append = T,sep=\"\")\n\t\t\t\t\telse\n\t\t\t\t\t\tcat(\" P\",i,\"[i,\", j, \"] <- P[i,\",pos[1],\"]*corr.factor[i]\\n\",file = paste(path,\"model.txt\",sep=\"\"), append = T,sep=\"\")\n\t\t\t}\n\t\t\telse {\n\t\t\t\tif (length(pos)>1) {\n\t\t\t\t\tstr <- paste(signs[-1],\"P[i+\", cumsum(miss.patterns.nobs)[i-1],\",\",pos[-1],\"]\",sep=\"\")\n\t\t\t\t\tif (complete)\n\t\t\t\t\t\tcat(\" P\",i,\"[i,\", j, \"] <- (P[i+\", cumsum(miss.patterns.nobs)[i-1],\",\",pos[1],\"]\",str,\")\\n\", file = paste(path,\"model.txt\",sep=\"\"), append = T,sep=\"\")\n\t\t\t\t\telse\n\t\t\t\t\t\tcat(\" P\",i,\"[i,\", j, \"] <- (P[i+\", cumsum(miss.patterns.nobs)[i-1],\",\",pos[1],\"]\",str,\")*corr.factor[i+\",cumsum(miss.patterns.nobs)[i-1],\"]\\n\", file = paste(path,\"model.txt\",sep=\"\"), append = T,sep=\"\")\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t\tif (complete)\n\t\t\t\t\t\tcat(\" P\",i,\"[i,\", j, \"] <- (P[i+\", cumsum(miss.patterns.nobs)[i-1],\",\",pos[1],\"])\\n\", file = paste(path,\"model.txt\",sep=\"\"), append = T,sep=\"\")\n\t\t\t\t\telse\n\t\t\t\t\t\tcat(\" P\",i,\"[i,\", j, \"] <- (P[i+\", cumsum(miss.patterns.nobs)[i-1],\",\",pos[1],\"])*corr.factor[i+\",cumsum(miss.patterns.nobs)[i-1],\"]\\n\", file = paste(path,\"model.txt\",sep=\"\"), append = T,sep=\"\")\n\t\t\t} \n\t\t}\n\t\tcat(\" P\",i,\"[i,\", miss.patterns.nprobs[i],\"] <- 1 - sum(P\",i,\"[i,1:\",miss.patterns.nprobs[i]-1,\"])\\n\", file = paste(path,\"model.txt\",sep=\"\"), append = T,sep=\"\")\n\t\tcat(\"for (k in 1:\", miss.patterns.nprobs[i],\") {\\n\", file = paste(path,\"model.txt\",sep=\"\"), append = T,sep=\"\")\n\t\t\t\tif (i==1) {\n\t\t\t\t\tcat(\"Dhat[i,k] <- P\",i,\"[i,k]*N\",i,\"[i]\\n\", file = paste(path,\"model.txt\",sep=\"\"), append = T,sep=\"\")\n\t\t\t\t\tcat(\"dev[i,k] <- 2*D[i,k]*(log(D[i,k])-log(Dhat[i,k]))\\n\", file = paste(path,\"model.txt\",sep=\"\"), append = T,sep=\"\")\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tcat(\"Dhat[i+\", cumsum(miss.patterns.nobs)[i-1],\",k] <- P\",i,\"[i,k]*N\",i,\"[i]\\n\", file = paste(path,\"model.txt\",sep=\"\"), append = T,sep=\"\")\n\t\t\t\t\tcat(\"dev[i+\",cumsum(miss.patterns.nobs)[i-1],\",k] <- 2*D[i+\",cumsum(miss.patterns.nobs)[i-1],\",k]*(log(D[i+\",cumsum(miss.patterns.nobs)[i-1],\",k])-log(Dhat[i+\",cumsum(miss.patterns.nobs)[i-1],\",k]))\\n\", file = paste(path,\"model.txt\",sep=\"\"), append = T,sep=\"\")\n\t\t\t\t}\n\t\tcat(\"}\\n}\", file = paste(path,\"model.txt\",sep=\"\"), append = T,sep=\"\")\n\t}\n\tncov <- length(covs)\n\tif (model == \"baseline\") {\n\t\tcat(\"\n\tfor (i in 1:narm) {\n\t\tfor (k in 1:ncat) {\n\t\t\tP[i,k] <- theta[i,k]/sum(theta[i,]) #baseline category \n\t\t\tlogit(theta[i,k]) <- Eta[study[i],k] + Delta[i,k] * (1 - equals(treat[i],base.treat[i]))\n\t\t}\n\t\tDelta[i,1] <- 0\\n\",file=paste(path,\"model.txt\",sep=\"\"),append=T,sep=\"\")\n\t\tif (ncat > 2) {\n\t\t\tif (var.model == \"ind\" | var.model == \"consis\") {\n\t\t\t\tcat(\"\n\t\t\t\tDelta[i,2:ncat] ~ dmnorm(Mu.Delta[i,], Prec[base.treat[i],treat[i],,])\\n\",file=paste(path,\"model.txt\",sep=\"\"),append=T,sep=\"\")\n\t\t\t}\n\t\t\telse if (var.model == \"hom\") {\n\t\t\t\tcat(\"\n\t\t\t\tDelta[i,2:ncat] ~ dmnorm(Mu.Delta[i,], Prec[,])\\n\",file=paste(path,\"model.txt\",sep=\"\"),append=T,sep=\"\")\n\t\t\t}\n\t\t}\n\t\telse if (ncat == 2) {\n\t\t\tif (var.model == \"ind\" | var.model == \"consis\") {\n\t\t\t\tcat(\"\n\t\t\t\tDelta[i,2] ~ dnorm(Mu.Delta[i], Prec[base.treat[i],treat[i]])\\n\",file=paste(path,\"model.txt\",sep=\"\"),append=T,sep=\"\")\n\t\t\t}\n\t\t\telse if (var.model == \"hom\") {\n\t\t\t\tcat(\"\n\t\t\t\tDelta[i,2] ~ dnorm(Mu.Delta[i], Prec)\\n\",file=paste(path,\"model.txt\",sep=\"\"),append=T,sep=\"\")\n\t\t\t}\n\t\t}\n\t\t\n\t}\n\telse if (model == \"logit\") {\n\t\tcat(\"\n\tfor (i in 1:narm) {\n\t\tfor (k in 1:(ncat - 1)) {\n\t\t\tlogit(P[i,k+1]) <- theta[i,k]\n\t\t\ttheta[i,k] <- Eta[study[i],k] + Delta[i,k] * (1 - equals(treat[i],base.treat[i]))\n\t\t}\\n\",file=paste(path,\"model.txt\",sep=\"\"),append=T,sep=\"\")\n\t\tif (ncat > 2) {\n\t\t\tif (var.model == \"ind\" | var.model == \"consis\") {\n\t\t\t\tcat(\"\n\t\t\t\tDelta[i,1:(ncat-1)] ~ dmnorm(Mu.Delta[i,], Prec[base.treat[i],treat[i],,])\\n\",file=paste(path,\"model.txt\",sep=\"\"),append=T,sep=\"\")\n\t\t\t}\n\t\t\telse if (var.model == \"hom\") {\n\t\t\t\tcat(\"\n\t\t\t\tDelta[i,1:(ncat-1)] ~ dmnorm(Mu.Delta[i,], Prec[,])\\n\",file=paste(path,\"model.txt\",sep=\"\"),append=T,sep=\"\")\n\t\t\t}\n\t\t}\n\t\telse if (ncat == 2) {\n\t\t\tif (var.model == \"ind\" | var.model == \"consis\") {\n\t\t\t\tcat(\"\n\t\t\t\tDelta[i,1] ~ dnorm(Mu.Delta[i], Prec[base.treat[i],treat[i]])\\n\",file=paste(path,\"model.txt\",sep=\"\"),append=T,sep=\"\")\n\t\t\t}\n\t\t\telse if (var.model == \"hom\") {\n\t\t\t\tcat(\"\n\t\t\t\tDelta[i,1] ~ dnorm(Mu.Delta[i], Prec)\\n\",file=paste(path,\"model.txt\",sep=\"\"),append=T,sep=\"\")\n\t\t\t}\n\t\t}\n\t}\n\tif (ncat > 2) {\n\t\tbase.string <- \" \t\t Mu.Delta[i,j] <- d[treat[i],j] - d[base.treat[i],j]\"\n\t\tcomment <- \" #Mu.Delta has dim [narm x (ncat-1)]\"\n\t}\n\telse if (ncat == 2) {\n\t\tbase.string <- \" \t\t Mu.Delta[i] <- d[treat[i],j] - d[base.treat[i],j]\"\n\t\tcomment <- \" #Mu.Delta has dim [narm x (ncat-1)]\"\n\t}\n\tcat(\"\n\t\tfor (j in 1:(ncat-1)){ \\n\",file=paste(path,\"model.txt\",sep=\"\"),append=T,sep=\"\")\n\t\t\tif (is.null(covs)) {\n\t\t\t\tcat(\n\t\t\t\tbase.string,comment,file=paste(path,\"model.txt\",sep=\"\"),append=T,sep=\"\")\n\t\t\t}\t\n\t\t\telse {\n\t\t\t\tbeta1.string <- base.string\n\t\t\t\tfor (k in 1:ncov)\n\t\t\t\t\tbeta1.string <- paste(beta1.string, \" + Beta\",k,\"[i,j]*x[i,\",k,\"]\",sep=\"\")\n\t\t\t\tcat(\n\t\t\t beta1.string,comment,file=paste(path,\"model.txt\",sep=\"\"),append=T,sep=\"\")\n\t\t\t for (k in 1:ncov) {\n\t\t\t \tbeta2.string <- paste(\"\\n Beta\",k,\"[i,j] <- slope[treat[i],\",k,\",j] - slope[base.treat[i],\",k,\",j]\",sep=\"\")\n\t\t\t \tcat(beta2.string, file=paste(path,\"model.txt\",sep=\"\"),append=T,sep=\"\")\n\t\t\t }\n\t\t\t}\n\t\t\tcat(\"\n \t}\\n\",file=paste(path,\"model.txt\",sep=\"\"),append=T,sep=\"\")\n\t\tif (model == \"baseline\") {\n\t\tcat(\"\n\t}\n\tfor (i in 1:nstudy) {\n\t\tEta[i, 2:ncat] ~ \", prior$Prior.Eta, \"\n\t\tEta[i, 1] <- 0\n\t}\\n\",file=paste(path,\"model.txt\",sep=\"\"),append=T,sep=\"\")\n\t\t}\n\t\telse if (model == \"logit\") {\n\t\t\tcat(\"\n\t\t\tP[i,1] <- 1 - sum(P[i,2:ncat])\n\t}\n\tfor (i in 1:nstudy) {\n\t\tEta[i,1:(ncat-1)] ~ \", prior$Prior.Eta, \"\n\t}\\n\",file=paste(path,\"model.txt\",sep=\"\"),append=T,sep=\"\")\n\t\t}\n\t\tcat(\"\n\tfor (k in 1:(ncat-1)) {\n\t\td[1,k] <- 0\\n\",file=paste(path,\"model.txt\",sep=\"\"),append=T,sep=\"\")\n\t\tif (!is.null(covs)) {\n\t\tcat(\"\n\t\tfor (j in 1:ncov) {\n\t\t slope[1,j,k] <- 0\n\t\t}\\n\",file=paste(path,\"model.txt\",sep=\"\"),append=T,sep=\"\")\n\t\t}\n\t\tcat(\"\n\t}\n\tfor (j in 2:ntreat) {\n\t\td[j,1:(ncat-1)] ~ \", prior$Prior.d, \"#d has dim [ntreat x (ncat-1)]\", file=paste(path,\"model.txt\",sep=\"\"),append=T,sep=\"\")\n\t\tif (!is.null(covs)) {\n\t\tcat(\"\n\t\tfor (k in 1:ncov) {\n\t\t\tslope[j,k,1:(ncat-1)] ~\", prior$Prior.slope, \n\t\t\"}\",file=paste(path,\"model.txt\",sep=\"\"),append=T,sep=\"\")\n\t\t}\n\t\tcat(\"\n\t}\",file=paste(path,\"model.txt\",sep=\"\"),append=T,sep=\"\")\n\tif (var.model == \"ind\") multi.var.ind(ntreat,ncat,prior,path)\n\telse if (var.model == \"hom\") multi.var.hom(ntreat,ncat,prior,varprior,path)\n\telse if (var.model == \"consis\") multi.var.consis(ntreat,ncat,prior,path)\n# odds.ratio <- exp(d)\n# pred.LOR ~ dmnorm(d, Prec)\n}\n\n\nmulti.model.completedata <-\nfunction (data,narm,ncat,path) \n{\ncat(\"\\n for (i in 1:\", narm,\") {\\n\", file = paste(path,\"model.txt\",sep=\"\"), append = T,sep=\"\")\ncat(\" D[i,1:\", ncat, \"] ~ dmulti(P[i,],N[i])\\n\", file = paste(path,\"model.txt\",sep=\"\"), append = T,sep=\"\")\ncat(\" N[i] <- sum(D[i,1:\", ncat, \"])\\n\t}\\n\", file = paste(path,\"model.txt\",sep=\"\"), append = T,sep=\"\")\n\n}\n\n\nmulti.model.missingdata <-\nfunction (miss.patterns, ncat,path) \n{\n\tif (length(miss.patterns[[1]])==1) complete = T\n\tmiss.patterns.nobs <- miss.patterns.nprobs <- integer(length(miss.patterns[[1]]))\n\tfor (i in seq(length(miss.patterns[[1]]))) {\n\t\tmiss.patterns.nprobs[i] <- length(miss.patterns[[1]][[i]][[2]]) #number of parameter combinations in ith pattern\n\t\tmiss.patterns.nobs[i] <- length(miss.patterns[[1]][[i]][[1]]) # number of studies in ith pattern\n\t}\n \n\tfor (i in seq(length(miss.patterns[[1]]))) {\n\t\tcat(\"\\n for (i in 1:\", miss.patterns.nobs[i],\") {\\n\", file = paste(path,\"model.txt\",sep=\"\"), append = T,sep=\"\")\n\t\tif (i==1) {\n\t\t\tcat(\" D[i,1:\", miss.patterns.nprobs[i], \"] ~ dmulti(P\",i,\"[i,],N\",i,\"[i])\\n\", file = paste(path,\"model.txt\",sep=\"\"), append = T,sep=\"\")\n\t\t\tcat(\" N\",i,\"[i] <- sum(D[i,1:\", miss.patterns.nprobs[i], \"])\\n\", file = paste(path,\"model.txt\",sep=\"\"), append = T,sep=\"\")\n\t\t}\n\t\telse {\n\t\t\tcat(\" D[i+\", cumsum(miss.patterns.nobs)[i-1], \",1:\", miss.patterns.nprobs[i], \"] ~ dmulti(P\",i,\"[i,],N\",i,\"[i])\\n\", file = paste(path,\"model.txt\",sep=\"\"), append = T,sep=\"\")\n\t\t\tcat(\" N\",i,\"[i] <- sum(D[i+\", cumsum(miss.patterns.nobs)[i-1], \",1:\", miss.patterns.nprobs[i], \"])\\n\", file = paste(path,\"model.txt\",sep=\"\"), append = T,sep=\"\")\n\t\t}\n\t\tfor (j in seq(miss.patterns.nprobs[i]-1)) {\n\t\t\tcombo <- miss.patterns[[2]][miss.patterns[[1]][[i]][[2]][j],]\n\t\t\tpos <- seq(ncat)[as.logical(abs(combo))]\n\t\t\tsigns <- ifelse(combo[pos]==1,\"+\",\"-\") \n\t\t\tif (i==1) {\n\t\t\t\tif (length(pos)>1) {\n\t\t\t\t\tstr <- paste(signs[-1],\"P[i,\",pos[-1],\"]\",sep=\"\")\n\t\t\t\t\tif (complete) \n\t\t\t\t\t\tcat(\" P\",i,\"[i,\", j, \"] <- (P[i,\",pos[1],\"]\",str,\")\\n\",file = paste(path,\"model.txt\",sep=\"\"), append = T, sep=\"\")\n\t\t\t\t\telse\n\t\t\t\t\t\tcat(\" P\",i,\"[i,\", j, \"] <- (P[i,\",pos[1],\"]\",str,\")*corr.factor[i]\\n\",file = paste(path,\"model.txt\",sep=\"\"), append = T,sep=\"\")\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t\tif (complete)\n\t\t\t\t\t\tcat(\" P\",i,\"[i,\", j, \"] <- P[i,\",pos[1],\"]\\n\",file = paste(path,\"model.txt\",sep=\"\"), append = T,sep=\"\")\n\t\t\t\t\telse\n\t\t\t\t\t\tcat(\" P\",i,\"[i,\", j, \"] <- P[i,\",pos[1],\"]*corr.factor[i]\\n\",file = paste(path,\"model.txt\",sep=\"\"), append = T,sep=\"\")\n\t\t\t}\n\t\t\telse {\n\t\t\t\tif (length(pos)>1) {\n\t\t\t\t\tstr <- paste(signs[-1],\"P[i+\", cumsum(miss.patterns.nobs)[i-1],\",\",pos[-1],\"]\",sep=\"\")\n\t\t\t\t\tif (complete)\n\t\t\t\t\t\tcat(\" P\",i,\"[i,\", j, \"] <- (P[i+\", cumsum(miss.patterns.nobs)[i-1],\",\",pos[1],\"]\",str,\")\\n\", file = paste(path,\"model.txt\",sep=\"\"), append = T,sep=\"\")\n\t\t\t\t\telse\n\t\t\t\t\t\tcat(\" P\",i,\"[i,\", j, \"] <- (P[i+\", cumsum(miss.patterns.nobs)[i-1],\",\",pos[1],\"]\",str,\")*corr.factor[i+\",cumsum(miss.patterns.nobs)[i-1],\"]\\n\", file = paste(path,\"model.txt\",sep=\"\"), append = T,sep=\"\")\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t\tif (complete)\n\t\t\t\t\t\tcat(\" P\",i,\"[i,\", j, \"] <- (P[i+\", cumsum(miss.patterns.nobs)[i-1],\",\",pos[1],\"])\\n\", file = paste(path,\"model.txt\",sep=\"\"), append = T,sep=\"\")\n\t\t\t\t\telse\n\t\t\t\t\t\tcat(\" P\",i,\"[i,\", j, \"] <- (P[i+\", cumsum(miss.patterns.nobs)[i-1],\",\",pos[1],\"])*corr.factor[i+\",cumsum(miss.patterns.nobs)[i-1],\"]\\n\", file = paste(path,\"model.txt\",sep=\"\"), append = T,sep=\"\")\n\t\t\t} \n\t\t}\n\t\tcat(\" P\",i,\"[i,\", miss.patterns.nprobs[i],\"] <- 1 - sum(P\",i,\"[i,1:\",miss.patterns.nprobs[i]-1,\"])\\n\", file = paste(path,\"model.txt\",sep=\"\"), append = T,sep=\"\")\n\t\tcat(\"for (k in 1:\", miss.patterns.nprobs[i],\") {\\n\", file = paste(path,\"model.txt\",sep=\"\"), append = T,sep=\"\")\n\t\t\t\tif (i==1) {\n\t\t\t\t\tcat(\"Dhat[i,k] <- P\",i,\"[i,k]*N\",i,\"[i]\\n\", file = paste(path,\"model.txt\",sep=\"\"), append = T,sep=\"\")\n\t\t\t\t\tcat(\"dev[i,k] <- 2*D[i,k]*(log(D[i,k])-log(Dhat[i,k]))\\n\", file = paste(path,\"model.txt\",sep=\"\"), append = T,sep=\"\")\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tcat(\"Dhat[i+\", cumsum(miss.patterns.nobs)[i-1],\",k] <- P\",i,\"[i,k]*N\",i,\"[i]\\n\", file = paste(path,\"model.txt\",sep=\"\"), append = T,sep=\"\")\n\t\t\t\t\tcat(\"dev[i+\",cumsum(miss.patterns.nobs)[i-1],\",k] <- 2*D[i+\",cumsum(miss.patterns.nobs)[i-1],\",k]*(log(D[i+\",cumsum(miss.patterns.nobs)[i-1],\",k])-log(Dhat[i+\",cumsum(miss.patterns.nobs)[i-1],\",k]))\\n\", file = paste(path,\"model.txt\",sep=\"\"), append = T,sep=\"\")\n\t\t\t\t}\n\t\tcat(\"}\\n}\", file = paste(path,\"model.txt\",sep=\"\"), append = T,sep=\"\")\n\t}\n}\n\n\nmulti.var.hom <- function(ntreat,ncat,prior,varprior,path)\n{\n\tif (ncat > 2) {\n\t\tcat(\"\n\t\tPrec[1:(ncat-1),1:(ncat-1)] ~ \",prior$Prior.Prec,\"\n\t\tSigma[1:(ncat-1),1:(ncat-1)] <- inverse(Prec[,])\n\t\t}\",file=paste(path,\"model.txt\",sep=\"\"),append=T,sep=\"\")\n\t}\n\telse {\n\t\tif (varprior[[1]]==\"prec\") {\n\t\t\tcat(\"\n\t\t\tPrec ~ \",prior$Prior.Prec,\"\n\t\t\tSigma <- 1/Prec\n\t\t\t}\",file=paste(path,\"model.txt\",sep=\"\"),append=T,sep=\"\")\n\t\t}\n\t\telse if (varprior[[1]] == \"sd\") {\n\t\t\tcat(\"\n\t\t\tSd ~ \",prior$Prior.Prec,\"\n\t\t\tPrec <- 1/pow(Sd,2)\n\t\t\t}\",file=paste(path,\"model.txt\",sep=\"\"),append=T,sep=\"\")\n\t\t}\n\t}\n}\n\nmulti.var.ind <- function (ntreat,ncat,prior,path)\n{\n# Prec has dim [(ntreat-1) x ntreat x (ncat-1) x (ncat-1)]\n\tif (ncat > 2) {\n\t\tcat(\"\n\t\tfor (i in 1:(ntreat-1)) {\n\t\t\tfor (j in 1:(ncat-1)) {\n\t\t\t\tPrec[i,i,j,j] <- 1\n\t\t\t\tSigma[i,i,j,j] <- 1\n\t\t\t\tfor (k in 1:(j-1)) {\n\t\t\t\t\tPrec[i,i,j,k] <- 0\n\t\t\t\t\tSigma[i,i,j,k] <- 0\n\t\t\t\t}\n\t\t\t\tfor (k in (j+1):(ncat-1)) {\n\t\t\t\t\tPrec[i,i,j,k] <- 0\n\t\t\t\t\tSigma[i,i,j,k] <- 0\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor (j in (i+1):ntreat) {\n\t\t\t\tPrec[i,j,1:(ncat-1),1:(ncat-1)] ~\",prior$Prior.Prec,\"\n\t\t\t\tSigma[i,j, 1:(ncat-1),1:(ncat-1)] <- inverse(Prec[i,j,,])\n\t\t\t}\",file=paste(path,\"model.txt\",sep=\"\"),append=T,sep=\"\")\n\t\t}\n\telse {\n\t\tcat(\"\n\t\tfor (i in 1:(ntreat-1)) {\n\t\t\tSigma[i,i] <- 1\n\t\t\tPrec[i,i] <- 1\n\t\t\tfor (j in (i+1):ntreat){\n\t\t\t\tPrec[i,j] ~\", prior$Prior.Prec,\"\n\t\t\t\tSigma[i,j] <- 1/Prec[i,j]\n\t\t\t}\",file=paste(path,\"model.txt\",sep=\"\"),append=T,sep=\"\")\n }\ncat(\"\n\t}\n}\",file=paste(path,\"model.txt\",sep=\"\"),append=T,sep=\"\")\n\n}\n\nmulti.var.consis <- function(ntreat,ncat,prior,path)\n{\n\tif (ncat > 2) {\n\t\tcat(\"\n\t\tfor(i in 1:(ntreat-1)){\n\t\t\tfor (j in 1:(ncat-1)) {\n\t\t\t\tPrec[i,i,j,j] <- 1\n\t\t\t\tSigma[i,i,j,j] <- 1\n\t\t\t\tfor (k in 1:(j-1)) {\n\t\t\t\t\tPrec[i,i,j,k] <- 0\n\t\t\t\t\tSigma[i,i,j,k] <- 0\n\t\t\t\t}\n\t\t\t\tfor (k in (j+1):(ncat-1)) {\n\t\t\t\t\tPrec[i,i,j,k] <- 0\n\t\t\t\t\tSigma[i,i,j,k] <- 0\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor (j in (i+1):ntreat) {\n\t\t\t\tSigma[j,i,1:(ncat-1),1:(ncat-1)] <- Sigma[i,j,1:(ncat-1),1:(ncat-1)]\n\t\t\t\tPrec[i,j, 1:(ncat-1),1:(ncat-1)] <- inverse(Sigma[i,j,,])\n\t\t\t\tPrec[j,i,1:(ncat-1),1:(ncat-1)] <- Prec[i,j,1:(ncat-1),1:(ncat-1)]\n\t\t\t}\n\t\t}\n\t\tfor (k in 1:(ncat-1)) {\n\t\t\tfor(i in 1:ntreat){\n\t\t\t\ttau.omega[i,k] ~\", prior$Prior.Prec,\" #dgamma(0.001,0.001)\n\t\t\t\tv.omega[i,k] <- 1/tau.omega[i,k]\n\t\t\t\tsd.omega[i,k] <- sqrt(v.omega[i,k]) \n\t\t\t}\n\t\t\tpi.half <- 1.5708\t\n\t\t\tfor(i in 1:(ntreat-1)){\n\t\t\t\tfor(j in (i+1):ntreat){\n\t\t\t\t\tL[j,i,k] <- 0\n\t\t\t\t\tphi[i,j,k] ~ dunif(0, pi.half)\n\t\t\t\t\trho[i,j,k] <- inprod(L[, i,k],L[, j,k])\n\t\t\t\t\tSigma[i,j,k,k] <- v.omega[i,k]+v.omega[j,k]-2*rho[i,j,k]*sd.omega[i,k]*sd.omega[j,k] \n\t\t\t\t}\n\t\t\t}\n\t\t\tL[1,1,k] <- 1\n\t\t\tfor (i in 2:ntreat) {\n\t\t\t\tL[1,i,k] <- cos(phi[1,i,k])\n\t\t\t\tL[i,i,k] <- 1\n\t\t\t\tfor (j in 1:(i-1)) {\n\t\t\t\t\tL[i,i,k] <- L[i,i,k]*sin(phi[j,i,k])\n\t\t\t\t}\n\t\t\t\tfor (j in (i+1):ntreat) {\n\t\t\t\t\tL[i,j,k] <- cos(phi[i,j,k])\n\t\t\t\t\tfor (m in 1:(i-1)) {\n\t\t\t\t\t\tL[i,j,k] <- L[i,j,k] * sin(phi[m,j,k])\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\t\n\t\t}\t\n\t\t\t\n\t\t\t\",file=paste(path,\"model.txt\",sep=\"\"),append=T,sep=\"\")\n\t}\n\telse {\n\t\tcat(\"\n\t\tfor(i in 1:(ntreat-1){\n\t\t\tPrec[i,i] <- 1\n\t\t\tSigma[i,i] <- 1\n\t\t\tfor(j in (i+1):ntreat){\n\t\t\t\tSigma[j,i] <- Sigma[i,j]\n\t\t\t\tPrec[i,j] <- 1/Sigma[i,j]\n\t\t\t\tPrec[j,i] <- Prec[i,j] \n\t\t\t}\n\t\t}\n\t\tfor(i in 1:ntreat){\n\t\t\ttau.omega[i] ~\", prior$Prior.Prec,\" #dgamma(0.001,0.001)\n\t\t\tv.omega[i] <- 1/tau.omega[i]\n\t\t\tsd.omega[i] <- sqrt(v.omega[i]) \n\t\t}\n\t\tpi.half <- 1.5708\n\t\tfor(i in 1:(ntreat-1)){\n\t\t\tfor(j in (i+1):(ntreat){\n\t\t\t\tL[j,i] <- 0\n\t\t\t\tphi[i,j] ~ dunif(0, pi.half)\n\t\t\t\trho[i,j] <- inprod(L[, i],L[, j])\n\t\t\t\tSigma[i,j] <- v.omega[i]+v.omega[j]-2*rho[i,j]*sd.omega[i]*sd.omega[j] \n\t\t\t}\n\t\t}\n\t\tL[1,1] <- 1\n\t\tfor (i in 2:ntreat) {\n\t\t\tL[1,i] <- cos(phi[1,i])\n\t\t\tL[i,i] <- 1\n\t\t\tfor (j in 1:(i-1)) {\n\t\t\t\tL[i,i] <- L[i,i]*sin(phi[j,i])\n\t\t\t}\n\t\t\tfor (j in (i+1):ntreat) {\n\t\t\t\tL[i,j] <- cos(phi[i,j])\n\t\t\t\tfor (k in 1:(i-1)) {\n\t\t\t\t\tL[i,j] <- L[i,j] * sin(phi[k,j])\n\t\t\t\t}\n\t\t\t}\n\t\t}\t\n\t\",file=paste(path,\"model.txt\",sep=\"\"),append=T,sep=\"\")\n\t}\n\tcat(\"\n}\",file=paste(path,\"model.txt\",sep=\"\"),append=T,sep=\"\")\n}\n\n\nmulti.bugs.fit <-\nfunction(model.file,data.file,numChains,inits.files,path,pars.to.save,setsize,conv.limit,niters,nruns,covs)\n{\nmodelCheck(model.file) # check model file\nmodelData(\"data.txt\") # read data file\nmodelCompile(numChains=3) # compile model with 3 chains\nmodelInits(inits.files[1]) # read init data file\nmodelInits(inits.files[2]) # read init data file\nmodelInits(inits.files[3]) # read init data file\n\tsamplesSet(pars.to.save) # parameters to monitor\n i <- 0\n max.bgrRatio <- 10\n while (i < floor(niters/setsize) & max.bgrRatio > conv.limit) {\t\n\t\tmodelUpdate(setsize) # setsize number of iterations\n\t\ti <- i + 1\n\t\td.bgr <- matrix(unlist(samplesBgr(\"d\",1,setsize*i,plot=F,bins=2)),ncol=8,byrow=T)[,8]\n \tEta.bgr <- matrix(unlist(samplesBgr(\"Eta\",1,setsize*i,plot=F,bins=2)),ncol=8,byrow=T)[,8]\n\t\tif(!is.null(covs)) slope.bgr <- matrix(unlist(samplesBgr(\"slope\",1,setsize*i,plot=F,bins=2)),ncol=8,byrow=T)[,8]\n\t\tif(is.null(covs)) \n\t\t\tmax.bgrRatio <- max(c(d.bgr,Eta.bgr))\n\t\telse \n\t\t\tmax.bgrRatio <- max(c(d.bgr,slope.bgr,Eta.bgr))\n\t\tprint(max.bgrRatio)\n \tprint(d.bgr)\n \tif (!is.null(covs)) print(slope.bgr)\n\t\tprint(Eta.bgr)\n#\t\tif(max(current.bgrRatio) <= conv.limit) {\n#\t\t\tno.to.converge <- i*setsize\n#\t\t\tbreak()\n#\t\t}\n\t}\n\tno.to.converge <- i*setsize\n\tno.to.keep <- no.to.converge/2.0 # 2nd half of converged sequence is good\n\tif (nruns > no.to.keep) {\n\t samplesClear(\"*\")\n\t samplesSet(pars.to.save)\n#\t dicSet()\n\t modelUpdate(nruns)\n\t }\n\telse {\n\t thin <- floor(no.to.keep/nruns)\n\t samplesClear(\"*\")\n\t samplesSet(pars.to.save)\n#\t dicSet()\n\t modelUpdate(no.to.keep,thin=thin)\n\t}\n\tif (is.null(covs))\n\t\tparams <- c(samplesMonitors(\"d\"),samplesMonitors(\"Prec\"),samplesMonitors(\"P\"))\n\telse\n\t\tparams <- c(samplesMonitors(\"d\"),samplesMonitors(\"slope\"),samplesMonitors(\"Prec\"),samplesMonitors(\"P\")) \n\tsamples <- sapply(params, samplesSample)\n\tsamples.array <- array(samples, c(nrow(samples)/numChains, numChains, ncol(samples)))\n\tdimnames(samples.array)[[3]] <- dimnames(samples)[[2]]\n\tmat<-samplesStats(\"*\",beg=2+no.to.keep,end=modelIteration()) # summarized results\n#\tdic<-dicStats()\n#\tout<-list(no.to.converge,dim(samples.array)[1],mat,dic,samples.array)\n#\tnames(out) <- c(\"BurnIn\",\"No. Runs Per Chain\", \"Stats\",\"DIC\",\"Samples\")\n\tout<-list(no.to.converge,dim(samples.array)[1],mat,samples.array)\n\tnames(out) <- c(\"BurnIn\",\"No. Runs Per Chain\", \"Stats\",\"Samples\")\n\treturn(out)\n}\n\nrank.tx = function(x)\n{\n# Produces treatment ranking tables for each outcome category. Row is the rank and column is the treatment. \n# Entry gives the probability that column treatment has row rank\n# x is output from model for d #sims x(ntreat-1)x(ncat-1)\n\tncat = dim(x)[3] + 1\n\tntreat = dim(x)[2] + 1\n\tnsim = dim(x)[1]\n\trank.d = d.expand = array(NA,c(0,1,0) + dim(x))\n\td.expand[,1,] = 0\n\td.expand[,2:dim(d.expand)[2],] = x\n\trank.table = array(NA,c(ntreat,ntreat,ncat-1))\n\tfor (i in seq(nsim))\n\t\trank.d[i,,] = apply(d.expand[i,,],2,rank)\n\tfor (i in seq(ncat-1))\n\t\tfor (j in seq(ntreat))\n\t\t\trank.table[,j,i] = table(rank.d[,j,i])/nsim\n\tdimnames(rank.table) = list(NULL,paste(\"Treatment\",seq(ntreat),sep=\" \"),paste(\"Category\",seq(ncat-1),sep=\" \"))\n\treturn(rank.table)\n}\n\nplot.rank.tx = function(rank.table,catnames,txnames)\n{\n#Plots probability that each treatment is in specific rank\n#rank.table is ranking table produced by function rank.tx\n#catnames are names of outcome categories\n#txnames are names of treatments\n\tntreat = dim(rank.table)[1]\n\tncat = dim(rank.table)[3]\n\tif (missing(catnames)) catnames = paste(\"Outcome Category\",seq(ncat),sep=\" \")\n\tif (missing(txnames)) txnames = paste(\"Treatment\",seq(ncat),sep=\" \")\n\tncol = floor(sqrt(ncat))\n\tnrow = ceiling(ncat/ncol)\n\toldpar = par(no.readonly=T)\n\tpar(mfrow = c(nrow, ncol))\n\tfor (i in seq(ncat)) {\n\t\tplot(seq(ntreat),seq(ntreat),type=\"n\",xaxt=\"n\",ylim=c(0,1),yaxt=\"n\",ylab=\"Probability\",xlab=\"Rank\")\n\t\taxis(side=1,at=seq(ntreat))\n\t\taxis(side=2,at=seq(0,1,by=0.2))\n\t\ttitle(catnames[i])\n\t\tfor (j in seq(ntreat))\n\t\t\tpoints(seq(ntreat), rank.table[,j,i],type=\"l\",lty=j,col=j)\n\t\tlegend(1,1,txnames,lty=1:4,bty=\"n\",cex=.75)\n\t}\n\tpar(oldpar,no.readonly=T)\n}\n\nplot.cumrank.tx = function(rank.table,catnames,txnames)\n{\n#Plots cumulative probability of ranks for each treatment\n#rank.table is ranking table produced by function rank.tx\n#catnames are names of outcome categories\n#txnames are names of treatments\n\tntreat = dim(rank.table)[1]\n\tncat = dim(rank.table)[3]\n\tif (missing(catnames)) catnames = paste(\"Outcome Category\",seq(ncat),sep=\" \")\n\tif (missing(txnames)) txnames = paste(\"Treatment\",seq(ncat),sep=\" \")\n\tncol = floor(sqrt(ncat))\n\tnrow = ceiling(ncat/ncol)\n\toldpar = par(no.readonly=T)\n\tpar(mfrow = c(nrow, ncol))\n\tfor (i in seq(ncat)) {\n\t\tx = apply(rank.table[,,i],2,cumsum)\n\t\tplot(seq(ntreat),seq(ntreat),type=\"n\",xaxt=\"n\",ylim=c(0,1),yaxt=\"n\",ylab=\"Cumulative Probability\",xlab=\"Rank\")\n\t\taxis(side=1,at=seq(ntreat))\n\t\taxis(side=2,at=seq(0,1,by=0.2))\n\t\ttitle(catnames[i])\n\t\tfor (j in seq(ntreat))\n\t\t\tpoints(seq(ntreat), x[,j],type=\"l\",lty=j,col=j)\n\t\tlegend(1,1,txnames,lty=1:4,bty=\"n\",cex=.75)\n\t}\n\tpar(oldpar,no.readonly=T)\n}\n\nsucra = function(rank.table,catnames,txnames)\n{\n#mulit.\n#rank.table is ranking table produced by function rank.tx\n#catnames are names of outcome categories\n#txnames are names of treatments\n\tncat = dim(rank.table)[3]\n\tntreat = dim(rank.table)[1]\n\tif (missing(catnames)) catnames = paste(\"Outcome Category\",seq(ncat),sep=\" \")\n\tif (missing(txnames)) txnames = paste(\"Treatment\",seq(ncat),sep=\" \")\n\tx = array(NA,dim(rank.table)[2:3])\n\tfor (i in seq(ncat))\n\t\tx[,i] = apply(apply(rank.table[-ntreat,,i],2,cumsum),2,sum)/(ntreat-1)\n\tdimnames(x) = list(txnames,catnames)\n\treturn(x)\n}\n\nresid.diag = function(mod)\n# Graph leverage vs. residual deviance with curves of fit\n# Then returns summary statistics\n# mod model for which to compute diagnostics\n{\n\tNsize = mod$N # study sample sizes\n\tncat = mod$No.Categories # No. outcome categories\n\tmiss.patterns = mod$Missing.Data.Patterns # missing data patterns\n\tData = mod$Data #data frame with all outcomes\n\tpost.dev = mod$Stats[grep(\"dev\",dimnames(mod$Stats)[[1]]),1] #mean posterior residual deviance (#trials x #arms)\n\tx1 = grep(\"P\",dimnames(mod$Stats)[[1]])\n\tx2 = grep(\"Prec\",dimnames(mod$Stats)[[1]])\n\tx1[!x1%in%x2]\n\tP = matrix(mod$Stats[x1[!x1%in%x2],1],ncol=ncat,byrow=T) # posterior means of p[i,k]\n\tx <- seq(-3, 3, 0.1)\n\ty1 <- 1 - x^2\n\ty2 <- 2 - x^2\n\ty3 <- 3 - x^2\n# Convert complete posterior probabilities to incomplete ones\n\tmiss.mat = miss.patterns[[2]] #association of data variables to complete outcomes\n\tN = D = post.prob = rep(NA,length(post.dev))\n\tcount = 0\n\tnpatterns = length(miss.patterns[[1]])\n\tfor (i in seq(npatterns)) {\n\t\tvars = miss.patterns[[1]][[i]][[2]] # variables in ith missing data pattern\n\t\tarms = miss.patterns[[1]][[i]][[1]] # arms in ith missing data pattern\n\t\tD[count+seq(length(vars)*length(arms))] = t(Data[miss.patterns[[1]][[i]][[1]],miss.patterns[[1]][[i]][[2]]])\n\t\tN[count+seq(length(vars)*length(arms))] = rep(Nsize[arms],each=length(vars))\n\t\tpost.prob[count+seq(length(vars)*length(arms))] = as.vector(miss.mat[vars,]%*%t(P[arms,]))\n\t\tcount = count + length(vars)*length(arms)\t\t\n\t}\n\tD.post = 2*D*(log(D)-log(post.prob*N))\n\tpD = post.dev - D.post\n\tcount = 0\n\tpD.arm = post.dev.arm = rep(NA,dim(Data)[1])\n\tfor (i in seq(npatterns)) {\n\t\tarms = miss.patterns[[1]][[i]][[1]]\n\t\tnarms = length(arms)\n\t\tnvars = length(miss.patterns[[1]][[i]][[2]])\n\t\tfor (j in seq(narms)) {\n\t\t\tpost.dev.arm[arms[1]-1+j] = sum(post.dev[count+seq(nvars)])\n\t\t\tpD.arm[arms[1]-1+j] = sum(pD[count+seq(nvars)])\n\t\t\tcount = count + nvars\n\t\t}\n\t}\n\tplot(sign(post.dev.arm)*sqrt(abs(post.dev.arm)), pD.arm, pch = 1, main = \"Fit of the model\", xlab = \"Residual Deviance (postDi)\", ylab = \"Leverage (pDi)\")\n matlines(x, cbind(y1, y2, y3))\n resid.dev = sum(post.dev)\n\teff.no.params = sum(pD)\n\tDIC = resid.dev + eff.no.params\n\tout = list(round(post.dev,2), round(pD,2), post.dev.arm, pD.arm,resid.dev,length(D),eff.no.params,DIC)\n\tnames(out) = c(\"Residual deviances\",\"Leverages\",\"Arm Deviances\",\"Arm Leverages\",\"Total Residual Deviance\",\"Number of data points\",\"Effective number of parameters\",\"DIC\")\n\treturn(out)\n}\n\n\nmake.tx.comparison.table <-\nfunction (x) \n{\n# For each outcome category, produces summary statistics comparing treatment i to j. First row gives means for # each treatment.\n# x is coda output for d #sims x(ntreat-1)x(ncat-1)\nncat<-dim(x)[3] + 1\nntreat <- dim(x)[2] + 1\nmean.x <- sd.x <- median.x <- p025.x <- p975.x <- array(NA,c(ntreat,ntreat,ncat-1))\ndimnames(mean.x) = list(paste(\"Tx \",seq(ntreat)),paste(\"Tx \",seq(ntreat)),paste(\"Category \",seq(ncat-1)))\ndimnames(sd.x) = dimnames(median.x) = dimnames(p025.x) = dimnames(p975.x) = dimnames(mean.x)\nfor (k in 1:(ncat-1)) {\n\tfor (i in 2:ntreat) {\n\t\tmean.x[1,i,k] <- mean(x[,i-1,k],na.rm=T)\n\t\tsd.x[1,i,k] <- sd(x[,i-1,k],na.rm=T)\n\t\tmedian.x[1,i,k] <- median(x[,i-1,k],na.rm=T)\n\t\tp025.x[1,i,k] <- quantile(x[,i-1,k],.025,na.rm=T)\n\t\tp975.x[1,i,k] <- quantile(x[,i-1,k],.975,na.rm=T)\n\t\tfor (j in i:ntreat) {\n\t\t\tif (j > i) {\n\t\t\t\tmean.x[i,j,k] <- mean(x[,j-1,k]-x[,i-1,k],na.rm=T)\n\t\t\t\tsd.x[i,j,k] <- sd(x[,j-1,k]-x[,i-1,k],na.rm=T)\n\t\t\t\tmedian.x[i,j,k] <- median(x[,j-1,k]-x[,i-1,k],na.rm=T)\n\t\t\t\tp025.x[i,j,k] <- quantile(x[,j-1,k]-x[,i-1,k],.025,na.rm=T)\n\t\t\t\tp975.x[i,j,k] <- quantile(x[,j-1,k]-x[,i-1,k],.975,na.rm=T)\n\t\t\t}\n\t\t}\n\t}\n}\nout<-list(mean.x,sd.x,exp(median.x),exp(p025.x),exp(p975.x))\nnames(out)<-c(\"Mean\",\"SD\",\"Median\",\"P025\",\"P975\")\nreturn(out)\n}\n\n\nmulti.stats <-\nfunction(x) {\nlast.dim <- length(dim(x))\nmargins<-seq(2,last.dim)\nmean.x<-apply(x,margins,mean,na.rm=T)\nmedian.x<-apply(x,margins,median,na.rm=T)\nsd.x<-apply(x,margins,sd,na.rm=T)\np025.x<-apply(x,margins,quantile,.025,na.rm=T)\np975.x<-apply(x,margins,quantile,.975,na.rm=T)\nout<-list(mean.x,sd.x,median.x,p025.x,p975.x)\nnames(out)<-c(\"Mean\",\"SD\",\"Median\",\"P025\",\"P975\")\nreturn(out)\n}\n\n\nsort.vec <-\nfunction(x)\n{\n# Re-sorts vector x in order starting with one\n\told.x <- x\n sorted.x <- sort(unique(old.x))\n nx <- length(old.x)\n x <- rep(NA, nx)\n for (i in 1:nx) x[old.x == sorted.x[i]] <- i #relabel studies in numerical order starting with one\n return(x)\n}\n\ninvlogit <-\nfunction(x){exp(x)/(1+exp(x))}\n\n\nlogit <-\nfunction(p){log(p/(1-p))}\n\nunit.test <-\nfunction () \n{\n\nrun.multi(lipids.data[1:8,], c(8,10), ,lipids.data[1:8,11], conv.limit=1.2,nruns=5000,model=\"baseline\",path=\"e:/multiple treatments/multinomial/\")\ncat(\"2\\n\\n\\n\\n\")\nrun.multi(lipids.data[1:34,],c(8,10), ,lipids.data[1:34,11],conv.limit=1.2,nruns=5000,model=\"baseline\",path=\"e:/multiple treatments/multinomial/\")\ncat(\"3\\n\\n\\n\\n\")\nrun.multi(lipids.data[1:8,], 5:10,,lipids.data[1:8,14], conv.limit=1.2,nruns=5000,model=\"baseline\",path=\"e:/multiple treatments/multinomial/\")\ncat(\"4\\n\\n\\n\\n\")\nrun.multi(lipids.data[1:18,],5:10,,lipids.data[1:18,14],conv.limit=1.2,nruns=5000,model=\"baseline\",path=\"e:/multiple treatments/multinomial/\")\ncat(\"5\\n\\n\\n\\n\")\nrun.multi(lipids.data[c(1:8,19:22),],5:14,misspatterns2.multi,lipids.data[c(1:8,19:22),14],conv.limit=1.2,nruns=5000,model=\"baseline\",path=\"e:/multiple treatments/multinomial/\")\ncat(\"6\\n\\n\\n\\n\")\nrun.multi(lipids.data,5:14,misspatterns.multi,lipids.data[,14],conv.limit=1.2,nruns=5000,model=\"baseline\",path=\"e:/multiple treatments/multinomial/\")\ncat(\"7\\n\\n\\n\\n\")\nrun.multi(lipids.data[1:8,], c(8,10), ,lipids.data[1:8,11], conv.limit=1.2,nruns=5000,model=\"logit\",path=\"e:/multiple treatments/multinomial/\")\ncat(\"8\\n\\n\\n\\n\")\nrun.multi(lipids.data[1:34,],c(8,10), ,lipids.data[1:34,11],conv.limit=1.2,nruns=5000,model=\"logit\",path=\"e:/multiple treatments/multinomial/\")\ncat(\"9\\n\\n\\n\\n\")\nrun.multi(lipids.data[1:8,],5:10,,lipids.data[1:8,14],conv.limit=1.2,nruns=5000,model=\"logit\",path=\"e:/multiple treatments/multinomial/\")\ncat(\"10\\n\\n\\n\\n\")\nrun.multi(lipids.data[1:18,],5:10,,lipids.data[1:18,14],conv.limit=1.2,nruns=5000,model=\"logit\",path=\"e:/multiple treatments/multinomial/\")\ncat(\"11\\n\\n\\n\\n\")\nrun.multi(lipids.data[c(1:8,19:22),],5:14,misspatterns2.multi,lipids.data[c(1:8,19:22),14],conv.limit=1.2,nruns=5000,model=\"logit\",path=\"e:/multiple treatments/multinomial/\")\ncat(\"12\\n\\n\\n\\n\")\nrun.multi(lipids.data,5:14,misspatterns.multi,lipids.data[,14],conv.limit=1.2,nruns=5000,model=\"logit\",path=\"e:/multiple treatments/multinomial/\")\ncat(\"13\\n\\n\\n\\n\")\nrun.multi(lipids.data[1:8,], c(8,10), ,lipids.data[1:8,11], conv.limit=1.2,nruns=5000,model=\"baseline\",path=\"e:/multiple treatments/multinomial/\",covs=4)\ncat(\"14\\n\\n\\n\\n\")\nrun.multi(lipids.data[1:34,],c(8,10), ,lipids.data[1:34,11],conv.limit=1.2,nruns=5000,model=\"baseline\",path=\"e:/multiple treatments/multinomial/\",covs=4)\ncat(\"15\\n\\n\\n\\n\")\nrun.multi(lipids.data[c(1:8,15,16,19:22,27,28),], c(8,10,12),,lipids.data[c(1:8,15,16,19:22,27,28),8] + lipids.data[c(1:8,15,16,19:22,27,28),10] + lipids.data[c(1:8,15,16,19:22,27,28),12], conv.limit=1.2,nruns=5000,model=\"baseline\",path=\"e:/multiple treatments/multinomial/\",covs=4)\ncat(\"16\\n\\n\\n\\n\")\nrun.multi(lipids.data[1:32,],c(8,10,12),,lipids.data[1:32,8]+lipids.data[1:32,10]+lipids.data[1:32,12],conv.limit=1.2,nruns=5000,model=\"baseline\",path=\"e:/multiple treatments/multinomial/\")\ncat(\"17\\n\\n\\n\\n\")\nrun.multi(lipids.data[c(1:8,19:22),],5:14,misspatterns2.multi,lipids.data[c(1:8,19:22),14],conv.limit=1.2,nruns=5000,model=\"baseline\",path=\"e:/multiple treatments/multinomial/\",covs=4)\ncat(\"18\\n\\n\\n\\n\")\nrun.multi(lipids.data,5:14,misspatterns.multi,lipids.data[,14],conv.limit=1.2,nruns=5000,model=\"baseline\",path=\"e:/multiple treatments/multinomial/\",covs=4)\ncat(\"19\\n\\n\\n\\n\")\nrun.multi(lipids.data[1:8,], c(8,10), ,lipids.data[1:8,11], conv.limit=1.2,nruns=5000,model=\"logit\",path=\"e:/multiple treatments/multinomial/\",covs=4)\ncat(\"20\\n\\n\\n\\n\")\nrun.multi(lipids.data[1:34,], c(8,10), ,lipids.data[1:34,11], conv.limit=1.2,nruns=5000,model=\"logit\",path=\"e:/multiple treatments/multinomial/\",covs=4)\ncat(\"21\\n\\n\\n\\n\")\nrun.multi(lipids.data[c(1:8,15,16,19:22,27,28),], c(8,10,12),,lipids.data[c(1:8,15,16,19:22,27,28),8] + lipids.data[c(1:8,15,16,19:22,27,28),10] + lipids.data[c(1:8,15,16,19:22,27,28),12], conv.limit=1.2,nruns=5000,model=\"logit\",path=\"e:/multiple treatments/multinomial/\",covs=4)\ncat(\"22\\n\\n\\n\\n\")\nrun.multi(lipids.data[1:32,],c(8,10,12),,lipids.data[1:32,8]+lipids.data[1:32,10]+lipids.data[1:32,12],conv.limit=1.2,nruns=5000,model=\"logit\",path=\"e:/multiple treatments/multinomial/\",covs=4)\ncat(\"23\\n\\n\\n\\n\")\nrun.multi(lipids.data[c(1:8,15,16,19:22,27,28),],5:14,misspatterns2.multi,lipids.data[c(1:8,15,16,19:22,27,28),14],conv.limit=1.2,nruns=5000,model=\"logit\",path=\"e:/multiple treatments/multinomial/\",covs=4)\ncat(\"24\\n\\n\\n\\n\")\nrun.multi(lipids.data,5:14,misspatterns.multi,lipids.data[,14],conv.limit=1.2,nruns=5000,model=\"logit\",path=\"e:/multiple treatments/multinomial/\",covs=4)\ncat(\"25\\n\\n\\n\\n\")\nrun.multi(lipids.data[1:8,], c(8,10), ,lipids.data[1:8,11], conv.limit=1.2,nruns=5000,model=\"baseline\",path=\"e:/multiple treatments/multinomial/\",covs=c(2,4))\ncat(\"26\\n\\n\\n\\n\")\nrun.multi(lipids.data[1:34,],c(8,10), ,lipids.data[1:34,11],conv.limit=1.2,nruns=5000,model=\"baseline\",path=\"e:/multiple treatments/multinomial/\",covs=c(2,4))\ncat(\"27\\n\\n\\n\\n\")\nrun.multi(lipids.data[c(1:8,15,16,19:22,27,28),], c(8,10,12),,lipids.data[c(1:8,15,16,19:22,27,28),8] + lipids.data[c(1:8,15,16,19:22,27,28),10] + lipids.data[c(1:8,15,16,19:22,27,28),12], conv.limit=1.2,nruns=5000,model=\"logit\",path=\"e:/multiple treatments/multinomial/\",covs=c(2,4))\ncat(\"28\\n\\n\\n\\n\")\nrun.multi(lipids.data[1:32,],c(8,10,12),,lipids.data[1:32,8]+lipids.data[1:32,10]+lipids.data[1:32,12],conv.limit=1.2,nruns=5000,model=\"logit\",path=\"e:/multiple treatments/multinomial/\",covs=c(2,4))\ncat(\"29\\n\\n\\n\\n\")\nrun.multi(lipids.data[c(1:8,19:22),],5:14,misspatterns2.multi,lipids.data[c(1:8,19:22),14],conv.limit=1.2,nruns=5000,model=\"baseline\",path=\"e:/multiple treatments/multinomial/\",covs=c(2,4))\ncat(\"30\\n\\n\\n\\n\")\n#run.multi(lipids.data,5:14,misspatterns.multi,lipids.data[,14],conv.limit=1.2,nruns=5000,model=\"baseline\",path=\"e:/multiple treatments/multinomial/\",covs=c(2,4))\ncat(\"31\\n\\n\\n\\n\")\nrun.multi(lipids.data[1:8,], c(8,10), ,lipids.data[1:8,11], conv.limit=1.2,nruns=5000,model=\"logit\",path=\"e:/multiple treatments/multinomial/\",covs=c(2,4))\ncat(\"32\\n\\n\\n\\n\")\nrun.multi(lipids.data[1:34,],c(8,10), ,lipids.data[1:34,11],conv.limit=1.2,nruns=5000,model=\"logit\",path=\"e:/multiple treatments/multinomial/\",covs=c(2,4))\ncat(\"33\\n\\n\\n\\n\")\nrun.multi(lipids.data[c(1:8,15,16,19:22,27,28),], c(8,10,12),,lipids.data[c(1:8,15,16,19:22,27,28),8] + lipids.data[c(1:8,15,16,19:22,27,28),10] + lipids.data[c(1:8,15,16,19:22,27,28),12], conv.limit=1.2,nruns=5000,model=\"logit\",path=\"e:/multiple treatments/multinomial/\",covs=c(2,4))\ncat(\"34\\n\\n\\n\\n\")\nrun.multi(lipids.data[1:32,],c(8,10,12),,lipids.data[1:32,8]+lipids.data[1:32,10]+lipids.data[1:32,12],conv.limit=1.2,nruns=5000,model=\"logit\",path=\"e:/multiple treatments/multinomial/\",covs=c(2,4))\ncat(\"35\\n\\n\\n\\n\")\nrun.multi(lipids.data[c(1:8,19:22),],5:14,misspatterns2.multi,lipids.data[c(1:8,19:22),14],conv.limit=1.2,nruns=5000,model=\"logit\",path=\"e:/multiple treatments/multinomial/\",covs=c(2,4))\ncat(\"36\\n\\n\\n\\n\")\n#run.multi(lipids.data,5:14,misspatterns.multi,lipids.data[,14],conv.limit=1.2,nruns=5000,model=\"logit\",path=\"e:/multiple treatments/multinomial/\",covs=c(2,4))\ncat(\"37\\n\\n\\n\\n\")\n}\n\n" }, { "alpha_fraction": 0.4961240291595459, "alphanum_fraction": 0.5059431791305542, "avg_line_length": 32.66666793823242, "blob_id": "67eb7b7e3e63ba118fa0724b113be931c5c3b421", "content_id": "c6ad6616f07396084f723017097281187715aeb7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1935, "license_type": "no_license", "max_line_length": 127, "num_lines": 57, "path": "/src/R/openmetar/R/classes.r", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "####################################\n# #\n# OpenMeta[Analyst] #\n# ---- #\n# classes.r # \n# contains class definitions for #\n# OpenMeta. # \n# # \n# (note that classes in R are #\n# basically structs) #\n####################################\n\n#\n# This is the super (parent) class. All Open Meta data objects should inherit from this\n# type.\nsetClass(\"OMData\", representation(study.names=\"character\", notes=\"character\", \n years=\"integer\", covariates=\"list\"))\n\n####\n# BinaryData type\n#\nsetClass(\"BinaryData\", \n representation(g1O1=\"numeric\", g1O2=\"numeric\", g2O1=\"numeric\", g2O2=\"numeric\",\n y=\"numeric\", SE=\"numeric\",\n g1.name=\"character\", g2.name=\"character\"), \n contains=\"OMData\")\n \n\n####\n# DiagnosticData type\n# \nsetClass(\"DiagnosticData\", \n representation(TP=\"numeric\", FN=\"numeric\", TN=\"numeric\", FP=\"numeric\", \n y=\"numeric\", SE=\"numeric\", g1.name=\"character\"), \n contains=\"OMData\")\n \n####\n# ContinuousData type\n# \nsetClass(\"ContinuousData\", \n representation(N1=\"numeric\", mean1=\"numeric\", sd1=\"numeric\",\n N2=\"numeric\", mean2=\"numeric\", sd2=\"numeric\",\n y=\"numeric\", SE=\"numeric\",\n g1.name=\"character\", g2.name=\"character\"), \n contains=\"OMData\")\n \n \n#\n# The specificiation class contains parameters, etc., for the method to be run\n#\nsetClass(\"AnalysisSpecification\", \n representation(parameters=\"data.frame\"))\n\n#\n# The covariate class contains covariate values.\n#\nsetClass(\"CovariateValues\", representation(cov.name=\"character\", cov.vals=\"vector\", cov.type=\"character\", ref.var=\"character\"))\n " }, { "alpha_fraction": 0.6382189393043518, "alphanum_fraction": 0.6604823470115662, "avg_line_length": 32.36170196533203, "blob_id": "b6c1dc0ebb13e6715e6cf714333fbec14b2b4d97", "content_id": "6ec7130b83c2bb100b27158760ce7886a98ac4eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1617, "license_type": "no_license", "max_line_length": 378, "num_lines": 47, "path": "/src/R/HSROC/man/beta.parameter.Rd", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "\\name{beta.parameter}\r\n\\alias{beta.parameter}\r\n\r\n\\title{A function that returns the shape parameters of the beta distribution}\r\n\\description{\r\n When provided the minimum and maximum values corresponding to the prior information on a parameter (within [0, 1]), the function returns the shape parameters of the beta distribution, namely \\eqn{\\alpha}{alpha} and \\eqn{\\beta}{beta}, that covers this range. \r\n}\r\n\r\n\\usage{\r\nbeta.parameter(low, up) \r\n}\r\n\r\n\r\n\\arguments{ \r\n \\item{low}{numeric argument. Lower bound of the range}\r\n \\item{up}{numeric argument. Upper bound of the range}\r\n} \r\n\r\n\\value{\r\nIt returns a list object consisting of the shape parameters \\code{alpha} and \\code{beta} of the Beta distribution such that \\deqn{\\frac{\\alpha}{\\alpha + \\beta} = \\frac{low + up}{2}}{alpha/(alpha + beta) = (low + up)/2} and \\deqn{\\frac{\\alpha \\beta}{(\\alpha + \\beta)^2(\\alpha + \\beta + 1)} = \\frac{up - low}{4}}{(alpha*beta)/( (alpha + beta)^2 (alpha + beta + 1)) = (up - low)/4}\r\n\r\nThe function has been built such that the particular case where \\eqn{low = up = 1}{low = up = 1} will return \\eqn{\\alpha = \\beta = 1}{alpha = beta = 1}. \r\n}\r\n\r\n\r\n\\details{\r\n\r\nThe \\code{low} argument must always be less than the \\code{up} argument. Also, both \\code{low} and \\code{up} arguments must remain within [0, 1].\r\n\r\n\r\n}\r\n\r\n \r\n\\examples{\r\n \r\n\r\n\\dontrun{beta.parameter(-1, 0.5)} #Returns error!\r\n\\dontrun{beta.parameter(0, 0)} #Not allowed. Returns error!\r\n\\dontrun{beta.parameter(0.75, 0.25)} #Returns error!\r\n\r\nbeta.parameter(0, 1)\r\nbeta.parameter(0.5, 1) \r\nbeta.parameter(0.1, 0.7) \r\n\r\n}\r\n\r\n\\keyword{methods}\r\n\r\n" }, { "alpha_fraction": 0.701932966709137, "alphanum_fraction": 0.7238658666610718, "avg_line_length": 61.74752426147461, "blob_id": "76544777faf20b80cc3e9a279445c22ee343e03f", "content_id": "6987ee55c23f212a7faf9583721186040f353590", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12675, "license_type": "no_license", "max_line_length": 136, "num_lines": 202, "path": "/src/forms/ui_data_type_page.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'data_type_page.ui'\n#\n# Created: Thu Jun 27 10:21:34 2013\n# by: PyQt4 UI code generator 4.10.1\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt4 import QtCore, QtGui\n\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n def _fromUtf8(s):\n return s\n\ntry:\n _encoding = QtGui.QApplication.UnicodeUTF8\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig, _encoding)\nexcept AttributeError:\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig)\n\nclass Ui_DataTypePage(object):\n def setupUi(self, DataTypePage):\n DataTypePage.setObjectName(_fromUtf8(\"DataTypePage\"))\n DataTypePage.resize(450, 350)\n sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(DataTypePage.sizePolicy().hasHeightForWidth())\n DataTypePage.setSizePolicy(sizePolicy)\n DataTypePage.setMinimumSize(QtCore.QSize(450, 350))\n DataTypePage.setMaximumSize(QtCore.QSize(450, 350))\n self.verticalLayout = QtGui.QVBoxLayout(DataTypePage)\n self.verticalLayout.setObjectName(_fromUtf8(\"verticalLayout\"))\n self.label_2 = QtGui.QLabel(DataTypePage)\n font = QtGui.QFont()\n font.setUnderline(False)\n self.label_2.setFont(font)\n self.label_2.setObjectName(_fromUtf8(\"label_2\"))\n self.verticalLayout.addWidget(self.label_2)\n self.horizontalLayout_3 = QtGui.QHBoxLayout()\n self.horizontalLayout_3.setObjectName(_fromUtf8(\"horizontalLayout_3\"))\n self.onearm_proportion_Button = QtGui.QToolButton(DataTypePage)\n self.onearm_proportion_Button.setMinimumSize(QtCore.QSize(90, 65))\n self.onearm_proportion_Button.setMaximumSize(QtCore.QSize(90, 65))\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(_fromUtf8(\":/new_dataset/startscreens/proportion.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.onearm_proportion_Button.setIcon(icon)\n self.onearm_proportion_Button.setIconSize(QtCore.QSize(40, 40))\n self.onearm_proportion_Button.setCheckable(True)\n self.onearm_proportion_Button.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)\n self.onearm_proportion_Button.setObjectName(_fromUtf8(\"onearm_proportion_Button\"))\n self.buttonGroup = QtGui.QButtonGroup(DataTypePage)\n self.buttonGroup.setObjectName(_fromUtf8(\"buttonGroup\"))\n self.buttonGroup.addButton(self.onearm_proportion_Button)\n self.horizontalLayout_3.addWidget(self.onearm_proportion_Button)\n self.onearm_mean_Button = QtGui.QToolButton(DataTypePage)\n self.onearm_mean_Button.setMinimumSize(QtCore.QSize(90, 65))\n self.onearm_mean_Button.setMaximumSize(QtCore.QSize(90, 65))\n self.onearm_mean_Button.setBaseSize(QtCore.QSize(10, 10))\n icon1 = QtGui.QIcon()\n icon1.addPixmap(QtGui.QPixmap(_fromUtf8(\":/new_dataset/startscreens/mean.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.onearm_mean_Button.setIcon(icon1)\n self.onearm_mean_Button.setIconSize(QtCore.QSize(40, 40))\n self.onearm_mean_Button.setCheckable(True)\n self.onearm_mean_Button.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)\n self.onearm_mean_Button.setObjectName(_fromUtf8(\"onearm_mean_Button\"))\n self.buttonGroup.addButton(self.onearm_mean_Button)\n self.horizontalLayout_3.addWidget(self.onearm_mean_Button)\n self.onearm_single_reg_coef_Button = QtGui.QToolButton(DataTypePage)\n self.onearm_single_reg_coef_Button.setMinimumSize(QtCore.QSize(120, 65))\n self.onearm_single_reg_coef_Button.setMaximumSize(QtCore.QSize(120, 65))\n self.onearm_single_reg_coef_Button.setBaseSize(QtCore.QSize(10, 10))\n icon2 = QtGui.QIcon()\n icon2.addPixmap(QtGui.QPixmap(_fromUtf8(\":/new_dataset/startscreens/single_reg_coef.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.onearm_single_reg_coef_Button.setIcon(icon2)\n self.onearm_single_reg_coef_Button.setIconSize(QtCore.QSize(40, 40))\n self.onearm_single_reg_coef_Button.setCheckable(True)\n self.onearm_single_reg_coef_Button.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)\n self.onearm_single_reg_coef_Button.setObjectName(_fromUtf8(\"onearm_single_reg_coef_Button\"))\n self.buttonGroup.addButton(self.onearm_single_reg_coef_Button)\n self.horizontalLayout_3.addWidget(self.onearm_single_reg_coef_Button)\n self.onearm_generic_effect_size_Button = QtGui.QToolButton(DataTypePage)\n self.onearm_generic_effect_size_Button.setMinimumSize(QtCore.QSize(90, 65))\n self.onearm_generic_effect_size_Button.setMaximumSize(QtCore.QSize(90, 65))\n self.onearm_generic_effect_size_Button.setBaseSize(QtCore.QSize(10, 10))\n icon3 = QtGui.QIcon()\n icon3.addPixmap(QtGui.QPixmap(_fromUtf8(\":/new_dataset/startscreens/gen_eff_size.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.onearm_generic_effect_size_Button.setIcon(icon3)\n self.onearm_generic_effect_size_Button.setIconSize(QtCore.QSize(40, 40))\n self.onearm_generic_effect_size_Button.setCheckable(True)\n self.onearm_generic_effect_size_Button.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)\n self.onearm_generic_effect_size_Button.setArrowType(QtCore.Qt.NoArrow)\n self.onearm_generic_effect_size_Button.setObjectName(_fromUtf8(\"onearm_generic_effect_size_Button\"))\n self.buttonGroup.addButton(self.onearm_generic_effect_size_Button)\n self.horizontalLayout_3.addWidget(self.onearm_generic_effect_size_Button)\n spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)\n self.horizontalLayout_3.addItem(spacerItem)\n self.verticalLayout.addLayout(self.horizontalLayout_3)\n self.line_2 = QtGui.QFrame(DataTypePage)\n self.line_2.setFrameShape(QtGui.QFrame.HLine)\n self.line_2.setFrameShadow(QtGui.QFrame.Sunken)\n self.line_2.setObjectName(_fromUtf8(\"line_2\"))\n self.verticalLayout.addWidget(self.line_2)\n self.label_4 = QtGui.QLabel(DataTypePage)\n self.label_4.setObjectName(_fromUtf8(\"label_4\"))\n self.verticalLayout.addWidget(self.label_4)\n self.horizontalLayout_2 = QtGui.QHBoxLayout()\n self.horizontalLayout_2.setObjectName(_fromUtf8(\"horizontalLayout_2\"))\n self.twoarm_proportions_Button = QtGui.QToolButton(DataTypePage)\n self.twoarm_proportions_Button.setMinimumSize(QtCore.QSize(90, 65))\n self.twoarm_proportions_Button.setMaximumSize(QtCore.QSize(90, 65))\n self.twoarm_proportions_Button.setBaseSize(QtCore.QSize(10, 10))\n icon4 = QtGui.QIcon()\n icon4.addPixmap(QtGui.QPixmap(_fromUtf8(\":/new_dataset/startscreens/proportions.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.twoarm_proportions_Button.setIcon(icon4)\n self.twoarm_proportions_Button.setIconSize(QtCore.QSize(72, 44))\n self.twoarm_proportions_Button.setCheckable(True)\n self.twoarm_proportions_Button.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)\n self.twoarm_proportions_Button.setObjectName(_fromUtf8(\"twoarm_proportions_Button\"))\n self.buttonGroup.addButton(self.twoarm_proportions_Button)\n self.horizontalLayout_2.addWidget(self.twoarm_proportions_Button)\n self.twoarm_means_Button = QtGui.QToolButton(DataTypePage)\n self.twoarm_means_Button.setMinimumSize(QtCore.QSize(90, 65))\n self.twoarm_means_Button.setMaximumSize(QtCore.QSize(90, 65))\n self.twoarm_means_Button.setBaseSize(QtCore.QSize(10, 10))\n icon5 = QtGui.QIcon()\n icon5.addPixmap(QtGui.QPixmap(_fromUtf8(\":/new_dataset/startscreens/means.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.twoarm_means_Button.setIcon(icon5)\n self.twoarm_means_Button.setIconSize(QtCore.QSize(54, 40))\n self.twoarm_means_Button.setCheckable(True)\n self.twoarm_means_Button.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)\n self.twoarm_means_Button.setObjectName(_fromUtf8(\"twoarm_means_Button\"))\n self.buttonGroup.addButton(self.twoarm_means_Button)\n self.horizontalLayout_2.addWidget(self.twoarm_means_Button)\n self.twoarm_smds_Button = QtGui.QToolButton(DataTypePage)\n self.twoarm_smds_Button.setMinimumSize(QtCore.QSize(90, 65))\n self.twoarm_smds_Button.setMaximumSize(QtCore.QSize(90, 65))\n self.twoarm_smds_Button.setBaseSize(QtCore.QSize(10, 10))\n icon6 = QtGui.QIcon()\n icon6.addPixmap(QtGui.QPixmap(_fromUtf8(\":/new_dataset/startscreens/smd.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.twoarm_smds_Button.setIcon(icon6)\n self.twoarm_smds_Button.setIconSize(QtCore.QSize(40, 40))\n self.twoarm_smds_Button.setCheckable(True)\n self.twoarm_smds_Button.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)\n self.twoarm_smds_Button.setObjectName(_fromUtf8(\"twoarm_smds_Button\"))\n self.buttonGroup.addButton(self.twoarm_smds_Button)\n self.horizontalLayout_2.addWidget(self.twoarm_smds_Button)\n spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)\n self.horizontalLayout_2.addItem(spacerItem1)\n self.verticalLayout.addLayout(self.horizontalLayout_2)\n self.line_4 = QtGui.QFrame(DataTypePage)\n self.line_4.setFrameShape(QtGui.QFrame.HLine)\n self.line_4.setFrameShadow(QtGui.QFrame.Sunken)\n self.line_4.setObjectName(_fromUtf8(\"line_4\"))\n self.verticalLayout.addWidget(self.line_4)\n self.label_5 = QtGui.QLabel(DataTypePage)\n self.label_5.setObjectName(_fromUtf8(\"label_5\"))\n self.verticalLayout.addWidget(self.label_5)\n self.horizontalLayout = QtGui.QHBoxLayout()\n self.horizontalLayout.setObjectName(_fromUtf8(\"horizontalLayout\"))\n self.diagnostic_Button = QtGui.QToolButton(DataTypePage)\n self.diagnostic_Button.setMinimumSize(QtCore.QSize(90, 65))\n self.diagnostic_Button.setMaximumSize(QtCore.QSize(90, 65))\n self.diagnostic_Button.setBaseSize(QtCore.QSize(10, 10))\n icon7 = QtGui.QIcon()\n icon7.addPixmap(QtGui.QPixmap(_fromUtf8(\":/new_dataset/startscreens/diagnostic.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.diagnostic_Button.setIcon(icon7)\n self.diagnostic_Button.setIconSize(QtCore.QSize(85, 44))\n self.diagnostic_Button.setCheckable(True)\n self.diagnostic_Button.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)\n self.diagnostic_Button.setObjectName(_fromUtf8(\"diagnostic_Button\"))\n self.buttonGroup.addButton(self.diagnostic_Button)\n self.horizontalLayout.addWidget(self.diagnostic_Button)\n spacerItem2 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)\n self.horizontalLayout.addItem(spacerItem2)\n self.verticalLayout.addLayout(self.horizontalLayout)\n\n self.retranslateUi(DataTypePage)\n QtCore.QMetaObject.connectSlotsByName(DataTypePage)\n\n def retranslateUi(self, DataTypePage):\n DataTypePage.setWindowTitle(_translate(\"DataTypePage\", \"WizardPage\", None))\n DataTypePage.setTitle(_translate(\"DataTypePage\", \"What type of data do you have?\", None))\n self.label_2.setText(_translate(\"DataTypePage\", \"One piece of data from each study or studies with one group\", None))\n self.onearm_proportion_Button.setText(_translate(\"DataTypePage\", \"proportion\", None))\n self.onearm_mean_Button.setText(_translate(\"DataTypePage\", \"mean\", None))\n self.onearm_single_reg_coef_Button.setText(_translate(\"DataTypePage\", \"regression coefficient\", None))\n self.onearm_generic_effect_size_Button.setText(_translate(\"DataTypePage\", \"generic\\n\"\n\"effect size\", None))\n self.label_4.setText(_translate(\"DataTypePage\", \"Data on two or more groups per study\", None))\n self.twoarm_proportions_Button.setText(_translate(\"DataTypePage\", \"proportions\", None))\n self.twoarm_means_Button.setText(_translate(\"DataTypePage\", \"means\", None))\n self.twoarm_smds_Button.setText(_translate(\"DataTypePage\", \"SMD\", None))\n self.label_5.setText(_translate(\"DataTypePage\", \"Data on test performance\", None))\n self.diagnostic_Button.setText(_translate(\"DataTypePage\", \"diagnostic\", None))\n\nimport icons_rc\n" }, { "alpha_fraction": 0.5758829712867737, "alphanum_fraction": 0.5794973373413086, "avg_line_length": 44.95438766479492, "blob_id": "bf6e7a5da35f3f091fd7ca9aa47b265e7ae556da", "content_id": "d83931718afc10feafedfe7840730fd048205299", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 53398, "license_type": "no_license", "max_line_length": 207, "num_lines": 1162, "path": "/src/R/openmetar/R/diagnostic_methods.r", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "#######################################\n# OpenMeta[Analyst] #\n# ---- #\n# diagnostic_methods.r # \n# Facade module; wraps methods #\n# that perform analysis on diagnostic #\n# data in a coherent interface. # \n#######################################\n\nlibrary(metafor)\nlibrary(HSROC)\nlibrary(graphics)\n\ndiagnostic.logit.metrics <- c(\"Sens\", \"Spec\", \"PPV\", \"NPV\", \"Acc\")\ndiagnostic.log.metrics <- c(\"PLR\", \"NLR\", \"DOR\")\nbivariate.methods <- c(\"diagnostic.hsroc\", \"diagnostic.bivariate.ml\")\n\nadjust.raw.data <- function(diagnostic.data, params) {\n # adjust raw data by adding a constant to each entry \n TP <- diagnostic.data@TP\n FN <- diagnostic.data@FN \n TN <- diagnostic.data@TN \n FP <- diagnostic.data@FP\n \n if (\"to\" %in% names(params)) {\n if (params$to == \"all\") {\n TP <- TP + params$adjust\n FN <- FN + params$adjust\n TN <- TN + params$adjust\n FP <- FP + params$adjust\n } else if (params$to == \"only0\") {\n product <- TP * FN * TN * FP\n # product equals 0 if at least one entry in a row is 0\n TP[product == 0] <- TP[product == 0] + params$adjust\n FN[product == 0] <- FN[product == 0] + params$adjust\n TN[product == 0] <- TN[product == 0] + params$adjust\n FP[product == 0] <- FP[product == 0] + params$adjust\n } else if (params$to == \"if0all\") {\n if (any(c(TP,FN,TN,FP) == 0)) {\n TP <- TP + params$adjust\n FN <- FN + params$adjust\n TN <- TN + params$adjust\n FP <- FP + params$adjust \n }\n }\n }\n\n data.adj <- list(\"TP\"=TP, \"FN\"=FN, \"TN\"=TN, \"FP\"=FP)\n}\n\ncompute.diag.point.estimates <- function(diagnostic.data, params) {\n # Computes point estimates based on raw data and adds them to diagnostic.data\n data.adj <- adjust.raw.data(diagnostic.data, params)\n terms <- compute.diagnostic.terms(raw.data=data.adj, params)\n metric <- params$measure \n TP <- data.adj$TP\n FN <- data.adj$FN \n TN <- data.adj$TN \n FP <- data.adj$FP\n \n y <- terms$numerator / terms$denominator\n \n diagnostic.data@y <- eval(call(\"diagnostic.transform.f\", params$measure))$calc.scale(y, n)\n \n\t# logit scale SE\n diagnostic.data@SE <- switch(metric,\n Sens = sqrt((1 / TP) + (1 / FN)), \n Spec = sqrt((1 / TN) + (1 / FP)),\n PPV = sqrt((1 / TP) + (1 / FP)),\n NPV = sqrt((1 / TN) + (1 / FN)),\n Acc = sqrt((1 / (TP + TN)) + (1 / (FP + FN))),\n PLR = sqrt((1 / TP) - (1 / (TP + FN)) + (1 / FP) - (1 / (TN + FP))),\n NLR = sqrt((1 / TP) - (1 / (TP + FN)) + (1 / FP) - (1 / (TN + FP))),\n DOR = sqrt((1 / TP) + (1 / FN) + (1 / FP) + (1 / TN)))\n\t# display scale SE\n\n\n diagnostic.data\n}\n\ncompute.diagnostic.terms <- function(raw.data, params) { \n # compute numerator and denominator of diagnostic point estimate.\n metric <- params$measure\n TP <- raw.data$TP\n FN <- raw.data$FN \n TN <- raw.data$TN \n FP <- raw.data$FP\n numerator <- switch(metric,\n # sensitivity\n Sens = TP, \n # specificity\n Spec = TN,\n # pos. predictive value\n PPV = TP,\n #neg. predictive value\n NPV = TN,\n # accuracy\n Acc = TP + TN,\n # positive likelihood ratio\n PLR = TP * (TN + FP), \n # negative likelihood ratio\n NLR = FN * (TN + FP),\n # diagnostic odds ratio\n DOR = TP * TN)\n \n denominator <- switch(metric,\n # sensitivity\n Sens = TP + FN, \n # specificity\n Spec = TN + FP,\n # pos. predictive value\n PPV = TP + FP,\n #neg. predictive value\n NPV = TN + FN,\n # accuracy\n Acc = TP + TN + FP + FN,\n # positive likelihood ratio\n PLR = FP * (TP + FN), \n # negative likelihood ratio\n NLR = TN * (TP + FN),\n # diagnostic odds ratio\n DOR = FP * FN) \n\n terms <- list(\"numerator\"=numerator, \"denominator\"=denominator) \n}\n\ndiagnostic.transform.f <- function(metric.str){\n display.scale <- function(x, ...){\n if (metric.str %in% diagnostic.log.metrics){\n exp(x)\n } else if (metric.str %in% diagnostic.logit.metrics) {\n invlogit(x)\n } else {\n # identity function\n x\n }\n }\n \n calc.scale <- function(x, ...){\n if (metric.str %in% diagnostic.log.metrics){\n log(x)\n } else if (metric.str %in% diagnostic.logit.metrics){\n logit(x)\n } else {\n # identity function\n x\n }\n }\n list(display.scale = display.scale, calc.scale = calc.scale)\n}\n\nget.res.for.one.diag.study <- function(diagnostic.data, params){\n # this method can be called when there is only one study to \n # get the point estimate and lower/upper bounds.\n \n ######\n ## Do not check here if the object is NA; we want to recompute the \n ## data here regardless, and the program will throwup on this check if \n ## the y estimate doesn't exist on the object.\n #####\n diagnostic.data <- compute.diag.point.estimates(diagnostic.data, params)\n \n y <- diagnostic.data@y\n se <- diagnostic.data@SE\n\n # note: conf.level is given as, e.g., 95, rather than .95.\n alpha <- 1.0-(params$conf.level/100.0)\n mult <- abs(qnorm(alpha/2.0))\n ub <- y + mult*se\n lb <- y - mult*se\n # we make lists to comply with the get.overall method\n res <- list(\"b\"=c(y), \"ci.lb\"=lb, \"ci.ub\"=ub, \"se\"=se) \n res\n}\n\n###################################################\n# multiple diagnostic methods #\n###################################################\nmultiple.diagnostic <- function(fnames, params.list, diagnostic.data) {\n\n # wrapper for applying multiple diagnostic functions and metrics \n\n ####\n # fnames -- names of diagnostic meta-analytic functions to call\n # params.list -- parameter lists to be passed along to the functions in\n # fnames\n # diagnostic.data -- the (diagnostic data) that is to be analyzed \n ###\n metrics <- c()\n results <- list()\n pretty.names <- diagnostic.fixed.inv.var.pretty.names()\n sens.spec.outpath <- c()\n for (count in 1:length(params.list)) {\n metrics <- c(metrics, params.list[[count]]$measure)\n if (params.list[[count]]$measure==\"Sens\") {\n sens.index <- count\n\t\t\tpng(filename=paste(params.list[[count]]$fp_outpath,\"INTER\",sep=\"\")) # to fix windows popping out at you issue\n }\n if (params.list[[count]]$measure==\"Spec\") {\n spec.index <- count\n\t\t\tpng(filename=paste(params.list[[count]]$fp_outpath,\"INTER\",sep=\"\")) # to fix windows popping out at you issue\n }\n if (params.list[[count]]$measure==\"PLR\") {\n plr.index <- count\n\t\t\tpng(filename=paste(params.list[[count]]$fp_outpath,\"INTER\",sep=\"\")) # to fix windows popping out at you issue\n }\n if (params.list[[count]]$measure==\"NLR\") {\n nlr.index <- count\n\t\t\tpng(filename=paste(params.list[[count]]$fp_outpath,\"INTER\",sep=\"\")) # to fix windows popping out at you issue\n }\n }\n \n images <- c()\n image.order <- c()\n plot.names <- c()\n plot.params.paths <- c()\n plot.pdfs.paths <- c() # sometimes we want to just output pdfs at run-time\n remove.indices <- c()\n\treferences <- c()\n\n if ((\"Sens\" %in% metrics) & (\"Spec\" %in% metrics)) {\n ####\n # we are running an analysis for sens *and* spec;\n # has a bivariate method been selected??\n fname <- fnames[sens.index]\n if (fname %in% bivariate.methods){\n params.sens <- params.list[[sens.index]] # we could pick either here\n biv.results <- eval(call(fname, diagnostic.data, params.sens))\n results <- c(results, biv.results$Summary)\n images <- c(images, biv.results$images)\n image.order <- append.image.order(image.order, biv.results)\n remove.indices <- c(sens.index, spec.index)\n\t\t\treferences <- c(references, biv.results$Reference)\n } else {\n ###\n # we're not running bivariate; proceed as usual\n # create side-by-side forest plots for sens and spec.\n params.sens <- params.list[[sens.index]]\n params.spec <- params.list[[spec.index]]\n params.sens$create.plot <- FALSE\n params.spec$create.plot <- FALSE\n params.tmp <- list(\"left\"=params.sens, \"right\"=params.spec)\n \n diagnostic.data.sens <- compute.diag.point.estimates(diagnostic.data, params.sens)\n diagnostic.data.spec <- compute.diag.point.estimates(diagnostic.data, params.spec)\n diagnostic.data.all <- list(\"left\"=diagnostic.data.sens, \"right\"=diagnostic.data.spec)\n \n results.sens <- eval(call(fname, diagnostic.data.sens, params.sens))\n results.spec <- eval(call(fname, diagnostic.data.spec, params.spec))\n summary.sens <- list(\"Summary\"=results.sens$Summary)\n names(summary.sens) <- paste(eval(parse(text=paste(\"pretty.names$measure$\", params.sens$measure,sep=\"\"))), \" Summary\", sep=\"\")\n summary.spec <- list(\"Summary\"=results.spec$Summary)\n names(summary.spec) <- paste(eval(parse(text=paste(\"pretty.names$measure$\", params.spec$measure,sep=\"\"))), \" Summary\", sep=\"\")\n results <- c(results, summary.sens, summary.spec)\n\t\t\t\n\t\t\treferences <- c(references, results.sens$References) # reference for method will be the same for both sens&spec\n \n res.sens <- results.sens$Summary$MAResults\n res.spec <- results.spec$Summary$MAResults\n res <- list(\"left\"=res.sens, \"right\"=res.spec)\n plot.data <- create.side.by.side.plot.data(diagnostic.data.all, params=params.tmp, res=res)\n forest.path <- paste(params.sens$fp_outpath, sep=\"\")\n two.forest.plots(plot.data, outpath=forest.path)\n \n forest.plot.params.path <- save.data(om.data=diagnostic.data.all, res, params=params.tmp, plot.data)\n plot.params.paths.tmp <- c(\"Sensitivity and Specificity Forest Plot\"=forest.plot.params.path)\n plot.params.paths <- c(plot.params.paths, plot.params.paths.tmp)\n images.tmp <- c(\"Sensitivity and Specificity Forest Plot\"=forest.path)\n images <- c(images, images.tmp)\n image.order <- c(image.order, \"Sensitivity and Specificity Forest Plot\")\n plot.names.tmp <- c(\"forest plot\"=\"forest.plot\")\n plot.names <- c(plot.names, plot.names.tmp)\n \n # create SROC plot\n sroc.path <- \"./r_tmp/roc.png\"\n sroc.plot.data <- create.sroc.plot.data(diagnostic.data, params=params.sens)\n sroc.plot(sroc.plot.data, sroc.path)\n # we use the system time as our unique-enough string to store\n # the params object\n sroc.plot.params.path <- save.plot.data(sroc.plot.data)\n plot.params.paths.tmp <- c(\"SROC\"=sroc.plot.params.path)\n plot.params.paths <- c(plot.params.paths, plot.params.paths.tmp)\n images.tmp <- c(\"SROC\"=forest.path)\n images <- c(images, c(\"SROC\"=sroc.path))\n image.order <- c(image.order, \"SROC\")\n plot.names <- c(plot.names, c(\"sroc\"=\"sroc\"))\n remove.indices <- c(sens.index, spec.index)\n }\n }\n \n if ((\"NLR\" %in% metrics) & (\"PLR\" %in% metrics)) {\n # create side-by-side forest plots for NLR and PLR.\n params.nlr <- params.list[[nlr.index]]\n params.plr <- params.list[[plr.index]]\n params.nlr$create.plot <- FALSE\n params.plr$create.plot <- FALSE\n params.tmp <- list(\"left\"=params.nlr, \"right\"=params.plr)\n \n fname <- fnames[nlr.index]\n diagnostic.data.nlr <- compute.diag.point.estimates(diagnostic.data, params.nlr)\n diagnostic.data.plr <- compute.diag.point.estimates(diagnostic.data, params.plr)\n diagnostic.data.all <- list(\"left\"=diagnostic.data.nlr, \"right\"=diagnostic.data.plr)\n \n results.nlr <- eval(call(fname, diagnostic.data.nlr, params.nlr))\n results.plr <- eval(call(fname, diagnostic.data.plr, params.plr))\n summary.nlr <- list(\"Summary\"=results.nlr$Summary)\n names(summary.nlr) <- paste(eval(parse(text=paste(\"pretty.names$measure$\", params.nlr$measure,sep=\"\"))), \" Summary\", sep=\"\")\n summary.plr <- list(\"Summary\"=results.plr$Summary)\n names(summary.plr) <- paste(eval(parse(text=paste(\"pretty.names$measure$\", params.plr$measure,sep=\"\"))), \" Summary\", sep=\"\")\n results <- c(results, summary.nlr, summary.plr)\n\t\t\n\t\treferences <- c(references, results.nlr$References) # reference for method will be the same for both nlr&plr\n\t\t\n res.nlr <- results.nlr$Summary$MAResults\n res.plr <- results.plr$Summary$MAResults\n res <- list(\"left\"=res.nlr, \"right\"=res.plr)\n \n plot.data <- create.side.by.side.plot.data(diagnostic.data.all, res=res, params.tmp)\n \n forest.path <- paste(params.nlr$fp_outpath, sep=\"\")\n two.forest.plots(plot.data, outpath=forest.path)\n \n forest.plot.params.path <- save.data(diagnostic.data, res, params=params.tmp, plot.data)\n plot.params.paths.tmp <- c(\"NLR and PLR Forest Plot\"=forest.plot.params.path)\n plot.params.paths <- c(plot.params.paths, plot.params.paths.tmp)\n \n images.tmp <- c(\"NLR and PLR Forest Plot\"=forest.path)\n image.order <- c(image.order, \"NLR and PLR Forest Plot\")\n images <- c(images, images.tmp)\n \n plot.names.tmp <- c(\"forest plot\"=\"forest.plot\")\n plot.names <- c(plot.names, plot.names.tmp)\n \n remove.indices <- c(remove.indices, nlr.index, plr.index)\n\t\t\n\t\tcat(\"end of plr/nlr stuff\")\n }\n\n # remove fnames and params for side-by-side plots\n fnames <- fnames[setdiff(1:length(fnames), remove.indices)]\n params.list <- params.list[setdiff(1:length(params.list), remove.indices)]\n\t\n\t\n\n if (length(params.list) > 0) {\n for (count in 1:length(params.list)) {\n # create ma summaries and single (not side-by-side) forest plots.\n #pretty.names <- eval(call(paste(fnames[count],\".pretty.names\",sep=\"\")))\n diagnostic.data.tmp <- compute.diag.point.estimates(diagnostic.data, params.list[[count]])\n results.tmp <- eval(call(fnames[count], diagnostic.data.tmp, params.list[[count]]))\n images.tmp <- results.tmp$images\n names(images.tmp) <- paste(eval(parse(text=paste(\"pretty.names$measure$\",params.list[[count]]$measure,sep=\"\"))), \" Forest Plot\", sep=\"\")\n images <- c(images, images.tmp)\n image.order <- c(image.order, names(images.tmp))\n plot.params.paths.tmp <- results.tmp$plot_params_paths\n names(plot.params.paths.tmp) <- paste(eval(parse(text=paste(\"pretty.names$measure$\", params.list[[count]]$measure,sep=\"\"))), \" Forest Plot\", sep=\"\")\n plot.params.paths <- c(plot.params.paths, plot.params.paths.tmp)\n plot.names <- c(plot.names, results.tmp$plot_names)\n summary.tmp <- list(\"Summary\"=results.tmp$Summary)\n names(summary.tmp) <- paste(eval(parse(text=paste(\"pretty.names$measure$\",params.list[[count]]$measure,sep=\"\"))), \" Summary\", sep=\"\")\n \n\t\t references <- c(references, results.tmp$References)\n\t\t\tresults <- c(results, summary.tmp)\n }\n }\n\n graphics.off()\n results <- c(results, list(\"images\"=images,\n\t\t\t\t\t \"image_order\"=image.order,\n\t\t\t\t\t\t\t \"plot_names\"=plot.names, \n \"plot_params_paths\"=plot.params.paths,\n\t\t\t\t\t\t\t \"References\"=references))\n results\n}\n\nappend.image.order <- function(image.order, results){\n if (\"image_order\" %in% names(results)){\n image.order <- c(image.order, results[[\"image_order\"]])\n } else{\n # just keep the current order\n image.order <- c(image.order, names(results$images))\n }\n image.order\n}\n\n###################################################\n# diagnostic fixed effects #\n###################################################\ndiagnostic.fixed.inv.var <- function(diagnostic.data, params){\n # assert that the argument is the correct type\n if (!(\"DiagnosticData\" %in% class(diagnostic.data))) stop(\"Diagnostic data expected.\")\n results <- NULL\n if (length(diagnostic.data@TP) == 1 || length(diagnostic.data@y) == 1){\n res <- get.res.for.one.diag.study(diagnostic.data, params)\n # Package res for use by overall method.\n summary.disp <- list(\"MAResults\" = res) \n results <- list(\"Summary\"=summary.disp)\n } else {\n # call out to the metafor package\n res<-rma.uni(yi=diagnostic.data@y, sei=diagnostic.data@SE, \n [email protected],\n method=\"FE\", level=params$conf.level,\n digits=params$digits)\n\t\t# GD EXPERIMENTAL#########################\n\t\tres$study.weights <- (1 / res$vi) / sum(1 / res$vi)\n\t\tres$study.names <- [email protected]\n\t\tres$study.years <- diagnostic.data@years\n\t\t#########################################\n # Create list to display summary of results\n model.title <- paste(\"Diagnostic Fixed-Effect Model - Inverse Variance (k = \", res$k, \")\", sep=\"\")\n summary.disp <- create.summary.disp(diagnostic.data, params, res, model.title)\n pretty.names <- diagnostic.fixed.inv.var.pretty.names()\n pretty.metric <- eval(parse(text=paste(\"pretty.names$measure$\", params$measure,sep=\"\")))\n for (count in 1:length(summary.disp$table.titles)) {\n summary.disp$table.titles[count] <- paste(\" \", pretty.metric, \" -\", summary.disp$table.titles[count], sep=\"\")\n }\n # Write results to csv file\n if ((is.null(params$write.to.file)) || params$write.to.file == TRUE) {\n results.path <- paste(\"./r_tmp/diag_fixed_inv_var_\", params$measure, \"_results.csv\", sep=\"\")\n # @TODO Pass in results.path via params\n write.results.to.file(diagnostic.data, params, res, outpath=results.path) \n }\n if ((is.null(params$create.plot)) || params$create.plot == TRUE) {\n # A forest plot will be created unless\n # params.create.plot is set to FALSE.\n forest.path <- paste(params$fp_outpath, sep=\"\")\n plot.data <- create.plot.data.diagnostic(diagnostic.data, params, res)\n changed.params <- plot.data$changed.params\n # list of changed params values\n params.changed.in.forest.plot <- forest.plot(forest.data=plot.data, outpath=forest.path)\n changed.params <- c(changed.params, params.changed.in.forest.plot)\n params[names(changed.params)] <- changed.params\n # dump the forest plot params to disk; return path to\n # this .Rdata for later use\n forest.plot.params.path <- save.data(diagnostic.data, res, params, plot.data)\n\n plot.params.paths <- c(\"Forest Plot\"=forest.plot.params.path)\n images <- c(\"Forest Plot\"=forest.path)\n plot.names <- c(\"forest plot\"=\"forest_plot\")\n\n\t\t\t\n results <- list(\"images\"=images,\n\t\t\t\t\t \"Summary\"=summary.disp,\n \"plot_names\"=plot.names, \n \"plot_params_paths\"=plot.params.paths)\n } else {\n results <- list(\"Summary\"=summary.disp)\n } \n }\n\t\n\treferences <- \"this is a placeholder for diagnostic fixed effect inv var reference\"\n\tresults[[\"References\"]] <- references\n\t\n results\n}\n\ndiagnostic.fixed.inv.var.parameters <- function(){\n # parameters\n apply_adjustment_to = c(\"only0\", \"all\")\n\n params <- list(\"conf.level\"=\"float\", \"digits\"=\"int\",\n \"adjust\"=\"float\", \"to\"=apply_adjustment_to)\n\n # default values\n defaults <- list(\"conf.level\"=95, \"digits\"=3, \"adjust\"=.5, \"to\"=\"only0\")\n\n var_order = c(\"conf.level\", \"digits\", \"adjust\", \"to\")\n\n parameters <- list(\"parameters\"=params, \"defaults\"=defaults, \"var_order\"=var_order)\n}\n\ndiagnostic.fixed.inv.var.pretty.names <- function() {\n pretty.names <- list(\"pretty.name\"=\"Diagnostic Fixed-Effect Inverse Variance\", \n \"description\" = \"Performs fixed-effect meta-analysis with inverse variance weighting.\",\n \"conf.level\"=list(\"pretty.name\"=\"Confidence level\", \"description\"=\"Level at which to compute confidence intervals\"), \n \"digits\"=list(\"pretty.name\"=\"Number of digits\", \"description\"=\"Number of digits to display in results\"),\n \"adjust\"=list(\"pretty.name\"=\"Correction factor\", \"description\"=\"Constant c that is added to the entries of a two-by-two table.\"),\n \"to\"=list(\"pretty.name\"=\"Add correction factor to\", \"description\"=\"When Add correction factor is set to \\\"only 0\\\", the correction factor\n is added to all cells of each two-by-two table that contains at leason one zero. When set to \\\"all\\\", the correction factor\n is added to all two-by-two tables if at least one table contains a zero.\"),\n \"measure\"=list(\"Sens\"=\"Sensitivity\", \"Spec\"=\"Specificity\", \"DOR\"=\"Odds Ratio\", \"PLR\"=\"Positive Likelihood Ratio\",\n \"NLR\"=\"Negative Likelihood Ratio\") \n )\n}\n\ndiagnostic.fixed.inv.var.is.feasible <- function(diagnostic.data, metric){\n metric %in% c(\"Sens\", \"Spec\", \"PLR\", \"NLR\", \"DOR\")\n}\n\ndiagnostic.fixed.inv.var.overall <- function(results) {\n # this parses out the overall from the computed result\n res <- results$Summary$MAResults\n}\n\n################################################\n# diagnostic fixed effects -- mantel haenszel #\n################################################\ndiagnostic.fixed.mh <- function(diagnostic.data, params){\n # assert that the argument is the correct type\n if (!(\"DiagnosticData\" %in% class(diagnostic.data))) stop(\"Diagnostic data expected.\") \n results <- NULL\n if (length(diagnostic.data@TP) == 1 || length(diagnostic.data@y) == 1){\n res <- get.res.for.one.diagnostic.study(diagnostic.data, params)\n # Package res for use by overall method.\n summary.disp <- list(\"MAResults\" = res) \n results <- list(\"Summary\"=summary.disp)\n } \n else {\n res <- switch(params$measure,\n\n \"DOR\" = rma.mh(ai=diagnostic.data@TP, bi=diagnostic.data@FN, \n ci=diagnostic.data@FP, di=diagnostic.data@TN, [email protected],\n level=params$conf.level, digits=params$digits, measure=\"OR\",\n add=c(params$adjust, 0), to=c(as.character(params$to), \"none\")),\n \n \"PLR\" = rma.mh(ai=diagnostic.data@TP, bi=diagnostic.data@FN, \n ci=diagnostic.data@FP, di=diagnostic.data@TN, [email protected],\n level=params$conf.level, digits=params$digits, measure=\"RR\",\n add=c(params$adjust, 0), to=c(as.character(params$to), \"none\")),\n \n # For \"NLR\", switch ai with bi, and ci with di\n # in order to use rma.mh with measure \"RR\" \n \"NLR\" = rma.mh(ai=diagnostic.data@FN, bi=diagnostic.data@TP, \n ci=diagnostic.data@TN, di=diagnostic.data@FP, [email protected],\n level=params$conf.level, digits=params$digits, measure=\"RR\",\n add=c(params$adjust, 0), to=c(as.character(params$to), \"none\")))\n \n\t\t# GD EXPERIMENTAL#########################\n\t\tres$study.weights <- (1 / res$vi) / sum(1 / res$vi)\n\t\tres$study.names <- [email protected]\n\t\tres$study.years <- diagnostic.data@years\n\t\t#########################################\t\t\n # \n # Create list to display summary of results\n #\n model.title <- \"Diagnostic Fixed-Effect Model - Mantel Haenszel\"\n summary.disp <- create.summary.disp(diagnostic.data, params, res, model.title)\n pretty.names <- diagnostic.fixed.mh.pretty.names()\n pretty.metric <- eval(parse(text=paste(\"pretty.names$measure$\", params$measure,sep=\"\")))\n for (count in 1:length(summary.disp$table.titles)) {\n summary.disp$table.titles[count] <- paste(\" \", pretty.metric, \" -\", summary.disp$table.titles[count], sep=\"\")\n }\n # Write results to csv file\n if ((is.null(params$write.to.file)) || params$write.to.file == TRUE) {\n results.path <- paste(\"./r_tmp/diag_fixed_mh_\", params$measure, \"_results.csv\", sep=\"\")\n # @TODO Pass in results.path via params\n write.results.to.file(diagnostic.data, params, res, outpath=results.path) \n }\n #\n # generate forest plot\n #\n if ((is.null(params$create.plot)) || (params$create.plot == TRUE)) {\n if (is.null(diagnostic.data@y) || is.null(diagnostic.data@SE)) {\n diagnostic.data <- compute.diag.point.estimates(diagnostic.data, params)\n # compute point estimates for plot.data in case they are missing\n }\n forest.path <- paste(params$fp_outpath, sep=\"\")\n plot.data <- create.plot.data.diagnostic(diagnostic.data, params, res)\n changed.params <- plot.data$changed.params\n # list of changed params values\n params.changed.in.forest.plot <- forest.plot(forest.data=plot.data, outpath=forest.path)\n changed.params <- c(changed.params, params.changed.in.forest.plot)\n params[names(changed.params)] <- changed.params\n # dump the forest plot params to disk; return path to\n # this .Rdata for later use\n forest.plot.params.path <- save.data(diagnostic.data, res, params, plot.data)\n \n plot.params.paths <- c(\"Forest Plot\"=forest.plot.params.path)\n images <- c(\"Forest Plot\"=forest.path)\n plot.names <- c(\"forest plot\"=\"forest_plot\")\n results <- list(\"images\"=images,\n\t\t\t\t\t \"Summary\"=summary.disp,\n \"plot_names\"=plot.names, \n \"plot_params_paths\"=plot.params.paths)\n }\n else {\n results <- list(\"Summary\"=summary.disp)\n } \n }\n\t\n\treferences <- \"this is a placeholder for diagnostic fixed effect mh reference\"\n\tresults[[\"References\"]] <- references\n\t\n results\n}\n \ndiagnostic.fixed.mh.parameters <- function(){\n # parameters\n apply_adjustment_to = c(\"only0\", \"all\")\n \n params <- list(\"conf.level\"=\"float\", \"digits\"=\"int\",\n \"adjust\"=\"float\", \"to\"=apply_adjustment_to)\n \n # default values\n defaults <- list(\"conf.level\"=95, \"digits\"=3, \"adjust\"=.5, \"to\"=\"only0\")\n \n var_order = c(\"conf.level\", \"digits\", \"adjust\", \"to\")\n \n # constraints\n parameters <- list(\"parameters\"=params, \"defaults\"=defaults, \"var_order\"=var_order)\n}\n\ndiagnostic.fixed.mh.pretty.names <- function() {\n pretty.names <- list(\"pretty.name\"=\"Diagnostic Fixed-Effect Mantel Haenszel\", \n \"description\" = \"Performs fixed-effect meta-analysis using the Mantel Haenszel method.\",\n \"conf.level\"=list(\"pretty.name\"=\"Confidence level\", \"description\"=\"Level at which to compute confidence intervals\"), \n \"digits\"=list(\"pretty.name\"=\"Number of digits\", \"description\"=\"Number of digits to display in results\"),\n \"adjust\"=list(\"pretty.name\"=\"Correction factor\", \"description\"=\"Constant c that is added to the entries of a two-by-two table.\"),\n \"to\"=list(\"pretty.name\"=\"Add correction factor to\", \"description\"=\"When Add correction factor is set to \\\"only 0\\\", the correction factor\n is added to all cells of each two-by-two table that contains at leason one zero. When set to \\\"all\\\", the correction factor\n is added to all two-by-two tables if at least one table contains a zero.\"),\n \"measure\"=list(\"Sens\"=\"Sensitivity\", \"Spec\"=\"Specificity\", \"DOR\"=\"Odds Ratio\", \"PLR\"=\"Positive Likelihood Ratio\",\n \"NLR\"=\"Negative Likelihood Ratio\")\n )\n}\n\ndiagnostic.fixed.mh.is.feasible <- function(diagnostic.data, metric){\n metric %in% c(\"DOR\", \"PLR\", \"NLR\")\n}\n\ndiagnostic.fixed.mh.overall <- function(results) {\n # this parses out the overall from the computed result\n res <- results$Summary$MAResults\n}\n\n##################################################\n# diagnostic fixed effects -- Peto #\n##################################################\ndiagnostic.fixed.peto <- function(diagnostic.data, params){\n # assert that the argument is the correct type\n if (!(\"DiagnosticData\" %in% class(diagnostic.data))) stop(\"Diagnostic data expected.\") \n \n if (length(diagnostic.data@TP) == 1 || length(diagnostic.data@y) == 1){\n res <- get.res.for.one.diagnostic.study(diagnostic.data, params)\n # Package res for use by overall method.\n summary.disp <- list(\"MAResults\" = res) \n results <- list(\"Summary\"=summary.disp)\n }\n else{ \n res <- rma.peto(ai=diagnostic.data@TP, bi=diagnostic.data@FN, \n ci=diagnostic.data@FP, di=diagnostic.data@TN,\n\t\t\t\t\[email protected],\n level=params$conf.level,\n\t\t\t\t\tdigits=params$digits,\n add=c(params$adjust, 0),\n\t\t\t\t\tto=c(as.character(params$to), \"none\"))\n\t# GD EXPERIMENTAL#########################\n\tres$study.weights <- (1 / res$vi) / sum(1 / res$vi)\n\tres$study.names <- [email protected]\n\tres$study.years <- diagnostic.data@years\n\t#########################################\t\t\t\n\t\t\t\n # Corrected values for y and SE\n diagnostic.data@y <- res$yi\n diagnostic.data@SE <- sqrt(res$vi)\n \n # \n # Create list to display summary of results\n #\n model.title <- \"Diagnostic Fixed-Effect Model - Peto\"\n summary.disp <- create.summary.disp(diagnostic.data, params, res, model.title)\n pretty.names <- diagnostic.fixed.peto.pretty.names()\n pretty.metric <- eval(parse(text=paste(\"pretty.names$measure$\", params$measure,sep=\"\")))\n for (count in 1:length(summary.disp$table.titles)) {\n summary.disp$table.titles[count] <- paste(\" \", pretty.metric, \" -\", summary.disp$table.titles[count], sep=\"\")\n }\n \n if (is.null(params$create.plot) || (is.null(params$write.to.file))) {\n if (is.null(diagnostic.data@y) || is.null(diagnostic.data@SE)) {\n # compute point estimates for plot.data in case they are missing\n diagnostic.data <- compute.bin.point.estimates(diagnostic.data, params)\n }\n if (is.null(params$write.to.file)) {\n # Write results and study data to csv files \n res$study.weights <- (1 / res$vi) / sum(1 / res$vi)\n results.path <- paste(\"./r_tmp/diagnostic_fixed_peto_results.csv\")\n # @TODO Pass in results.path via params\n #data.path <- paste(\"./r_tmp/diagnostic_fixed_peto_study_data.csv\")\n write.results.to.file(diagnostic.data, params, res, outpath=results.path)\n }\n if (is.null(params$create.plot)) {\n # Create forest plot and list to display summary of results\n metric.name <- pretty.metric.name(as.character(params$measure))\n model.title <- \"Diagnostic Fixed-Effect Model - Peto\\n\\nMetric: Odds Ratio\"\n # Create results display tables\n summary.disp <- create.summary.disp(diagnostic.data, params, res, model.title)\n #\n # generate forest plot \n #\n forest.path <- paste(params$fp_outpath, sep=\"\")\n plot.data <- create.plot.data.diagnostic(diagnostic.data, params, res)\n changed.params <- plot.data$changed.params\n # list of changed params values\n params.changed.in.forest.plot <- forest.plot(forest.data=plot.data, outpath=forest.path)\n changed.params <- c(changed.params, params.changed.in.forest.plot)\n params[names(changed.params)] <- changed.params\n # dump the forest plot params to disk; return path to\n # this .Rdata for later use\n forest.plot.params.path <- save.data(diagnostic.data, res, params, plot.data)\n #\n # Now we package the results in a dictionary (technically, a named \n # vector). In particular, there are two fields that must be returned; \n # a dictionary of images (mapping titles to image paths) and a list of texts\n # (mapping titles to pretty-printed text). In this case we have only one \n # of each. \n # \n plot.params.paths <- c(\"Forest Plot\"=forest.plot.params.path)\n images <- c(\"Forest Plot\"=forest.path)\n plot.names <- c(\"forest plot\"=\"forest_plot\")\n results <- list(\"images\"=images,\n\t\t\t\t \"Summary\"=summary.disp,\n \"plot_names\"=plot.names,\n\t\t\t\t\t\t\"plot_params_paths\"=plot.params.paths)\n }\n }\n else {\n results <- list(\"Summary\"=res)\n } \n }\n \n references <- \"this is a placeholder for diagnostic fixed peto reference\"\n results[[\"References\"]] <- references\n \n results\n}\n\ndiagnostic.fixed.peto.parameters <- function(){\n # parameters\n apply_adjustment_to = c(\"only0\", \"all\")\n \n params <- list( \"conf.level\"=\"float\", \"digits\"=\"int\",\n \"adjust\"=\"float\", \"to\"=apply_adjustment_to)\n \n # default values\n defaults <- list(\"conf.level\"=95, \"digits\"=3, \"adjust\"=.5, \"to\"=\"only0\")\n \n var_order = c(\"conf.level\", \"digits\", \"adjust\", \"to\")\n \n parameters <- list(\"parameters\"=params, \"defaults\"=defaults, \"var_order\"=var_order)\n}\n\ndiagnostic.fixed.peto.pretty.names <- function() {\n pretty.names <- list(\"pretty.name\"=\"Diagnostic Fixed-Effect Peto\", \n \"description\" = \"Performs fixed-effect meta-analysis using the Peto method.\",\n \"conf.level\"=list(\"pretty.name\"=\"Confidence level\", \"description\"=\"Level at which to compute confidence intervals\"), \n \"digits\"=list(\"pretty.name\"=\"Number of digits\", \"description\"=\"Number of digits to display in results\"),\n \"adjust\"=list(\"pretty.name\"=\"Correction factor\", \"description\"=\"Constant c that is added to the entries of a two-by-two table.\"),\n \"to\"=list(\"pretty.name\"=\"Add correction factor to\", \"description\"=\"When Add correction factor is set to \\\"only 0\\\", the correction factor\n is added to all cells of each two-by-two table that contains at leason one zero. When set to \\\"all\\\", the correction factor\n is added to all two-by-two tables if at least one table contains a zero.\")\n )\n}\n\ndiagnostic.fixed.peto.is.feasible <- function(diagnostic.data, metric){\n # only feasible if we have raw (2x2) data for all studies\n # and the metric is `DOR'\n metric == \"DOR\" &&\n length(diagnostic.data@TP)==length(diagnostic.data@FN) &&\n length(diagnostic.data@FN)==length(diagnostic.data@FP) &&\n length(diagnostic.data@FP)==length(diagnostic.data@TN) &&\n length(diagnostic.data@TP) > 0\n}\n\ndiagnostic.fixed.peto.overall <- function(results) {\n # this parses out the overall from the computed result\n res <- results$Summary\n}\n\n##################################\n# diagnostic random effects #\n##################################\ndiagnostic.random <- function(diagnostic.data, params){\n # assert that the argument is the correct type\n if (!(\"DiagnosticData\" %in% class(diagnostic.data))) stop(\"Diagnostic data expected.\")\n \n results <- NULL\n if (length(diagnostic.data@TP) == 1 || length(diagnostic.data@y) == 1){\n res <- get.res.for.one.diag.study(diagnostic.data, params)\n # Package res for use by overall method.\n summary.disp <- list(\"MAResults\" = res) \n results <- list(\"Summary\"=summary.disp)\n } else {\n # call out to the metafor package\n res<-rma.uni(yi=diagnostic.data@y, sei=diagnostic.data@SE, \n [email protected],\n method=params$rm.method, level=params$conf.level,\n digits=params$digits)\n\n\t\t# GD EXPERIMENTAL#########################\n\t\tweights <- 1 / (res$vi + res$tau2)\n res$study.weights <- weights / sum(weights)\n\t\tres$study.names <- [email protected]\n\t\tres$study.years <- diagnostic.data@years\n\t\t#########################################\n\t\t \n # Create list to display summary of results\n model.title <- paste(\"Diagnostic Random-Effects Model (k = \", res$k, \")\", sep=\"\")\n summary.disp <- create.summary.disp(diagnostic.data, params, res, model.title)\n pretty.names <- diagnostic.random.pretty.names()\n pretty.metric <- eval(parse(text=paste(\"pretty.names$measure$\", params$measure,sep=\"\")))\n for (count in 1:length(summary.disp$table.titles)) {\n summary.disp$table.titles[count] <- paste(pretty.metric, \" -\", summary.disp$table.titles[count], sep=\"\")\n }\n # Write results and study data to csv files\n if ((is.null(params$write.to.file)) || params$write.to.file == TRUE) {\n results.path <- paste(\"./r_tmp/diag_random_\", params$measure, \"_results.csv\", sep=\"\")\n # @TODO Pass in results.path via params\n write.results.to.file(diagnostic.data, params, res, outpath=results.path)\n }\n #\n # generate forest plot \n #\n if ((is.null(params$create.plot)) || (params$create.plot == TRUE)) {\n forest.path <- paste(params$fp_outpath, sep=\"\")\n plot.data <- create.plot.data.diagnostic(diagnostic.data, params, res)\n changed.params <- plot.data$changed.params\n # list of changed params values\n params.changed.in.forest.plot <- forest.plot(forest.data=plot.data, outpath=forest.path)\n changed.params <- c(changed.params, params.changed.in.forest.plot)\n params[names(changed.params)] <- changed.params\n # update params values\n # we use the system time as our unique-enough string to store\n # the params object\n forest.plot.params.path <- save.data(diagnostic.data, res, params, plot.data)\n \n plot.params.paths <- c(\"Forest Plot\"=forest.plot.params.path)\n images <- c(\"Forest Plot\"=forest.path)\n plot.names <- c(\"forest plot\"=\"forest_plot\")\n\t\t\t\n\t\t\t\n results <- list(\"images\"=images,\n\t\t\t\t\t \"Summary\"=summary.disp,\n \"plot_names\"=plot.names, \n \"plot_params_paths\"=plot.params.paths)\n }\n else {\n results <- list(\"Summary\"=summary.disp)\n } \n }\n\t\n\treferences <- \"this is a placeholder for diagnostic random reference\"\n\tresults[[\"References\"]] <- references\n\t\n results\n}\n\ndiagnostic.random.parameters <- function(){\n apply.adjustment.to = c(\"only0\", \"all\")\n rm.method.ls <- c(\"HE\", \"DL\", \"SJ\", \"ML\", \"REML\", \"EB\")\n params <- list(\"rm.method\"=rm.method.ls, \"conf.level\"=\"float\", \"digits\"=\"int\",\n \"adjust\"=\"float\", \"to\"=apply.adjustment.to)\n \n # default values\n defaults <- list(\"rm.method\"=\"DL\", \"conf.level\"=95, \"digits\"=3, \n \"adjust\"=.5, \"to\"=\"only0\")\n \n var.order <- c(\"rm.method\", \"conf.level\", \"digits\", \"adjust\", \"to\")\n parameters <- list(\"parameters\"=params, \"defaults\"=defaults, \"var_order\"=var.order)\n}\n\ndiagnostic.random.pretty.names <- function() {\n\t# sort of redundant to have both this and rm_method_ls but whatever for now...\n\trm_method_names <- list(\n\t\t\tHE=\"Hedges-Olkin\",\n\t\t\tDL = \"DerSimonian-Laird\",\n\t\t\tSJ = \"Sidik-Jonkman\",\n\t\t\tML = \"Maximum Likelihood\",\n\t\t\tREML = \"Restricted Maximum Likelihood\", \n\t\t\tEB = \"Empirical Bayes\")\n\t\n pretty.names <- list(\"pretty.name\"=\"Diagnostic Random-Effects\", \n \"description\" = \"Performs random-effects meta-analysis.\",\n \"rm.method\"=list(\"pretty.name\"=\"Random-Effects method\", \"description\"=\"Method for estimating between-studies heterogeneity\", \"rm.method.names\"=rm_method_names), \n \"conf.level\"=list(\"pretty.name\"=\"Confidence level\", \"description\"=\"Level at which to compute confidence intervals\"), \n \"digits\"=list(\"pretty.name\"=\"Number of digits of precision to display\", \"description\"=\"Number of digits to display in results\"),\n \"adjust\"=list(\"pretty.name\"=\"Correction factor\", \"description\"=\"Constant c that is added to the entries of a two-by-two table.\"),\n \"to\"=list(\"pretty.name\"=\"Cells to which correction factor should be added\", \"description\"=\"When Add correction factor is set to \\\"only 0\\\", the correction factor\n is added to all cells of each two-by-two table that contains at leason one zero. When set to \\\"all\\\", the correction factor\n is added to all two-by-two tables if at least one table contains a zero.\"),\n \"measure\"=list(\"Sens\"=\"Sensitivity\", \"Spec\"=\"Specificity\", \"DOR\"=\"Odds Ratio\", \"PLR\"=\"Positive Likelihood Ratio\",\n \"NLR\"=\"Negative Likelihood Ratio\")\n )\n}\n\ndiagnostic.random.is.feasible <- function(diagnostic.data, metric){\n metric %in% c(\"Sens\", \"Spec\", \"PLR\", \"NLR\", \"DOR\") \n}\ndiagnostic.random.overall <- function(results) {\n # this parses out the overall from the computed result\n res <- results$Summary$MAResults\n}\n\n##################################\n# diagnostic hsroc #\n##################################\ndiagnostic.hsroc <- function(diagnostic.data, params){\n library(HSROC)\n prev.working.dir <- getwd()\n\n # step into r_tmp\n setwd(\"r_tmp\")\n\n ####\n # first we create a unique directory\n unique.name <- as.character(as.numeric(Sys.time()))\n out.dir <- paste(getwd(), unique.name, sep=\"/\")\n dir.create(out.dir)\n\n #### \n # convert the diagnostic data to a format consumable\n # by the HSROC lib, this means a data frame\n # with the following columns:\n # ++ +- -+ --\n diag.data.frame <- \n data.frame(TP=diagnostic.data@TP, FP=diagnostic.data@FP, FN=diagnostic.data@FN, TN=diagnostic.data@TN)\n\n ### set up and run the three chains\n chain.out.dirs <- c()\n for (chain.i in 1:params$num.chains){\n chain.out.dir <- paste(out.dir, \"/chain_\", chain.i, sep=\"\")\n dir.create(chain.out.dir)\n setwd(chain.out.dir)\n\n # TODO parameterize lambda, theta priors\n res <- try(HSROC(data=diag.data.frame, iter.num=params$num.iters, \n prior_LAMBDA=c(params$lambda.lower, params$lambda.upper), \n prior_THETA=c(params$theta.lower, params$theta.upper), \n path=chain.out.dir))\n\n # Put in try block in case HSROC fails\n if (class(res)==\"try-error\") {\n stop(\"Sorry -- HSROC failed during sampling. Perhaps try running it again?\")\n }\n chain.out.dirs <- c(chain.out.dirs, chain.out.dir)\n }\n\n hsroc.sum <- HSROCSummary(data=diag.data.frame , burn_in=params$burn.in, Thin=params$thin, print_plot=T ,\n path=out.dir, chain=chain.out.dirs )\n\n #### \n # pull out the summary\n summary <- c(hsroc.sum[1], hsroc.sum[2])\n\n ####\n # and the images\n images <- list()\n image.list <- hsroc.sum$image.list\n\n for (img.name in names(image.list)){\n cur.img.name <- image.list[[img.name]]\n image.list[[img.name]] <- paste(out.dir, cur.img.name, sep=\"/\")\n }\n\n images <- image.list\n\n # reset the working directory\n setwd(prev.working.dir)\n\n # we don't want the SROC plot to be mixed in with \n # the density plots...\n roc.plot.name <- \"Summary ROC\"\n image.names <- names(images)\n image.order <- append(roc.plot.name, image.names[image.names!=roc.plot.name])\n\treferences <- \"HSROC: C. M. Rutter and C. A. Gatsonis. A hierarchical regression approach to meta-analysis of diagnostic accuracy evaluations. Statistics in Medicine, 20(19):2865-2884, 2001.\"\n results <- list(\"images\"=images,\n\t\t\t \"image_order\"=image.order,\n\t\t\t\t\t\"Summary\"=summary,\n\t\t\t\t\t\"References\"=references)\n\n}\n\n\ndiagnostic.hsroc.parameters <- function(){\n params <- list(\"num.iters\"=\"float\", \"burn.in\"=\"float\", \"thin\"=\"float\", \n \"theta.lower\"=\"float\", \"theta.upper\"=\"float\",\n \"lambda.lower\"=\"float\", \"lambda.upper\"=\"float\",\n \"num.chains\"=\"float\")\n \n # default values\n defaults <- list(\"num.iters\"=5000, \"burn.in\"=1000, \"thin\"=2, \n \"theta.lower\"=-2, \"theta.upper\"=2,\n \"lambda.lower\"=-2, \"lambda.upper\"=2,\n \"num.chains\"=3)\n \n var.order <- c(\"num.iters\", \"burn.in\", \"thin\", \"num.chains\", \n \"theta.lower\", \"theta.upper\",\n \"lambda.lower\", \"lambda.upper\")\n parameters <- list(\"parameters\"=params, \"defaults\"=defaults, \"var_order\"=var.order)\n}\n\ndiagnostic.hsroc.pretty.names <- function() {\n pretty.names <- list(\"pretty.name\"=\"HSROC\", \n \"description\" = \"Hierarchical regression analysis of diagnostic data\\n (Rutter and Gatsonis, Statistics in Medicine, 2001).\",\n \"num.iters\"=list(\"pretty.name\"=\"Number of Iterations\", \"description\"=\"Number of iterations to run.\"),\n \"burn.in\"=list(\"pretty.name\"=\"Burn in\", \"description\"=\"Number of draws to use for convergence.\"),\n \"thin\"=list(\"pretty.name\"=\"Thin\", \"description\"=\"Thinning.\"),\n \"num.chains\"=list(\"pretty.name\"=\"Number of Chains\", \"description\"=\"Number of MCMC chains.\"),\n \"lambda.lower\"=list(\"pretty.name\"=\"prior on lambda (lower)\", \"description\"=\"Lower value in (uniform) range over expected lambda values.\"),\n \"lambda.upper\"=list(\"pretty.name\"=\"prior on lambda (upper)\", \"description\"=\"Upper value in (uniform) range over expected lambda values.\"),\n \"theta.lower\"=list(\"pretty.name\"=\"prior on theta (lower)\", \"description\"=\"Lower value in (uniform) range over expected theta values.\"),\n \"theta.upper\"=list(\"pretty.name\"=\"prior on theta (upper)\", \"description\"=\"Upper value in (uniform) range over expected theta values.\")\n )\n}\n\n\ndiagnostic.hsroc.ml.is.feasible <- function(diagnostic.data, metric){\n # only estimable when we have >= 5 studies\n length(diagnostic.data@TP) > 4\n}\n\n\n##################################\n# diagnostic biviariate #\n##################################\ndiagnostic.bivariate.ml <- function(diagnostic.data, params){\n\talpha <- 1.0-(params$conf.level/100.0)\n\tmult <- abs(qnorm(alpha/2.0))\n\t\n library(boot)\n\n adjusted.counts <- adjust.raw.data(diagnostic.data, params)\n\n biv.results <- bivariate.dx.test(adjusted.counts$TP, adjusted.counts$FP, adjusted.counts$FN, adjusted.counts$TN)\n\n \n #### \n # parse out results -- @TODO make this nicer.\n logit_sens = biv.results[1,1]\n logit_spec = biv.results[1,2]\n se_logit_sens = biv.results[1,3]\n se_logit_spec = biv.results[1,4]\n correlation = biv.results[1,7]\n\n digits = 4\n sensitivity <- round(inv.logit(logit_sens), digits)\n\t# Un-hard-coding CI.. issue # 214\n sens.low <- round(inv.logit(logit_sens - mult*se_logit_sens), digits)\n sens.high <- round(inv.logit(logit_sens + mult*se_logit_sens), digits)\n\n specificity <- round(inv.logit(logit_spec), digits)\n spec.low <- round(inv.logit(logit_spec - mult*se_logit_spec), digits)\n spec.high <- round(inv.logit(logit_spec + mult*se_logit_spec), digits)\n\n r <- round(biv.results$correlation, digits)\n\n report.array <- array(c(\"\", \"Sensitivity\",\"Specificity\", \"Correlation\",\n \"Estimate\", sensitivity, specificity, r,\n \"Lower bound\", sens.low, spec.low, \"\",\n \"Upper bound\", sens.high,spec.high, \"\"),\n dim=c(4,4))\n\n # this makes it pretty-print?\n class(report.array) <- \"summary.data\"\n\n\n # generate the plot\n path.to.roc.plot <- \"./r_tmp/bivariate\" # just hard-coding for now\n plot.bivariate(biv.results, adjusted.counts$TP, adjusted.counts$FP, \n adjusted.counts$FN, adjusted.counts$TN,\n filepath=path.to.roc.plot)\n\n images <- c(\"ROC Plot\"=path.to.roc.plot)\n\n\treferences <- \"this is a placeholder for bivariate references\"\n results <- list(\"images\"=images,\n\t\t\t \"Summary\"=list(\"Bivariate Summary\"=report.array),\n\t\t\t\t\t\"References\"=references)\n}\n\n\ndiagnostic.bivariate.ml.parameters <- function(){\n apply_adjustment_to = c(\"only0\", \"all\")\n\n params <- list(\"conf.level\"=\"float\", \"adjust\"=\"float\", \"to\"=apply_adjustment_to)\n\n # default values\n defaults <- list(\"conf.level\"=95, \"adjust\"=.5, \"to\"=\"only0\")\n\n var_order = c(\"conf.level\", \"adjust\", \"to\")\n\n parameters <- list(\"parameters\"=params, \"defaults\"=defaults, \"var_order\"=var_order)\n}\n\n\ndiagnostic.bivariate.ml.pretty.names <- function() {\n pretty.names <- list(\"pretty.name\"=\"Bivariate (Maximum Likelihood)\", \n \"description\" = \"Bivariate analysis of sensitivity and specificity \\n using maximum likelihood estimate.\",\n\t\t\t\t\t\t \"conf.level\"=list(\"pretty.name\"=\"Confidence level\", \"description\"=\"Level at which to compute confidence intervals\"), \n\t\t\t\t\t\t \"adjust\"=list(\"pretty.name\"=\"Correction factor\", \"description\"=\"Constant c that is added to the entries of a two-by-two table.\"),\n \"to\"=list(\"pretty.name\"=\"Add correction factor to\", \"description\"=\"When Add correction factor is set to \\\"only 0\\\", the correction factor\n is added to all cells of each two-by-two table that contains at leason one zero. When set to \\\"all\\\", the correction factor\n is added to all two-by-two tables if at least one table contains a zero.\")\n ) \n \n}\n\ndiagnostic.bivariate.ml.is.feasible <- function(diagnostic.data, metric){\n # only estimable when we have >= 5 studies\n length(diagnostic.data@TP) > 4\n}\n\n\n\n##################################\n# SROC Plot #\n##################################\ncreate.sroc.plot.data <- function(diagnostic.data, params){\n # create plot data for an ROC plot.\n \n # assert that the argument is the correct type\n if (!(\"DiagnosticData\" %in% class(diagnostic.data))) stop(\"Diagnostic data expected.\")\n\n # add constant to zero cells\n data.adj <- adjust.raw.data(diagnostic.data,params)\n # compute true positive ratio = sensitivity \n TPR <- data.adj$TP / (data.adj$TP + data.adj$FN)\n # compute false positive ratio = 1 - specificity\n FPR <- data.adj$FP / (data.adj$TN + data.adj$FP)\n S <- logit(TPR) + logit(FPR)\n D <- logit(TPR) - logit(FPR)\n s.range <- list(\"max\"=max(S), \"min\"=min(S))\n inv.var <- data.adj$TP + data.adj$FN + data.adj$FP + data.adj$TN\n res <- lm(D~S)\n fitted.line <- list(intercept=res$coefficients[1], slope=res$coefficients[2])\n std.err <- summary(res)$sigma\n # residual standard error\n alpha <- 1.0-(params$conf.level/100.0)\n mult <- abs(qnorm(alpha/2.0))\n # multiplier for std.err to get conf. int. bounds\n plot.options <- list()\n plot.options$roc.xlabel <- params$roc_xlabel\n plot.options$roc.ylabel <- params$roc_ylabel\n plot.options$roc.title <- params$roc_title\n # for future use as options from GUI\n plot.data <- list(\"fitted.line\" = fitted.line, \"TPR\"=TPR, \"FPR\"=FPR, \"std.err\"=std.err, \"mult\"=mult, \"inv.var\" = inv.var, \"s.range\" = s.range, \"plot.options\"=plot.options)\n}\n\n###################################################\n# create side-by-side forest.plots #\n###################################################\n\ncreate.side.by.side.plot.data <- function(diagnostic.data, params, res) { \n # creates data for two side-by-side forest plots\n params.left <- params$left\n params.right <- params$right\n #params.left$fp_show_col1 <- 'TRUE'\n #params.right$fp_show_col1 <- 'FALSE'\n # only show study names on the left plot\n res.left <- res$left\n res.right <- res$right \n diagnostic.data.left <- diagnostic.data$left\n diagnostic.data.right <- diagnostic.data$right\n \n plot.data.left <- create.plot.data.diagnostic(diagnostic.data.left, params.left, res.left)\n plot.data.left$options$fp.title <- pretty.metric.name(as.character(params.left$measure))\n \n plot.data.right <- create.plot.data.diagnostic(diagnostic.data.right, params.right, res.right)\n plot.data.right$options$fp.title <- pretty.metric.name(as.character(params.right$measure))\n \n plot.data <- list(\"left\"=plot.data.left, \"right\"=plot.data.right)\n plot.data\n}" }, { "alpha_fraction": 0.6210673451423645, "alphanum_fraction": 0.626274585723877, "avg_line_length": 44.985233306884766, "blob_id": "264f1d6ed29dd35e8733981f3ee2a1afad35faf4", "content_id": "14e80922ea85c1d469fb301c01da3ae1103fc0d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 87187, "license_type": "no_license", "max_line_length": 211, "num_lines": 1896, "path": "/src/R/openmetar/R/meta_methods.r", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "##################################################################\n# #\n# Byron C. Wallace #\n# Tufts Medical Center #\n# OpenMeta[analyst] #\n# --- #\n# We refer to methods that operate on estimates #\n# of subsets as `meta' methods. These include #\n# cumulative meta-analysis, leave-one-out meta-analysis #\n# and all-subsets meta-analysis. #\n# #\n# Any base meta-analytic #\n# method can be used as a basis for these methods, #\n# so long as the associated *.overall function #\n# is implemented. #\n##################################################################\n\ncum_meta_analysis_ref = 'Cumulative Meta-Analysis: Lau, Joseph, et al. \"Cumulative meta-analysis of therapeutic trials for myocardial infarction.\" New England Journal of Medicine 327.4 (1992): 248-254.)'\nsubgroup_ma_ref = \"Subgroup Meta-Analysis: subgroup ma reference placeholder\"\nloo_ma_ref = \"Leave-one-out Meta-Analysis: LOO ma reference placeholder\"\n\n##################################\n# binary cumulative MA #\n##################################\ncum.ma.binary <- function(fname, binary.data, params){\n # assert that the argument is the correct type\n if (!(\"BinaryData\" %in% class(binary.data))) stop(\"Binary data expected.\")\n \n params.tmp <- params\n # These temporarily turn off creating plots and writing results to file\n #params.tmp$create.plot <- FALSE\n #params.tmp$write.to.file <- FALSE\n\tparams.tmp$supress.output <- TRUE\n res <- eval(call(fname, binary.data, params.tmp))\n res.overall <- eval(call(paste(fname, \".overall\", sep=\"\"), res))\n # parse out the overall estimate\n plot.data <- create.plot.data.binary(binary.data, params, res.overall)\n # data for standard forest plot\n \n # iterate over the binaryData elements, adding one study at a time\n cum.results <- array(list(NULL), dim=c(length([email protected])))\n \n for (i in 1:length([email protected])){\n # build a BinaryData object including studies\n # 1 through i\n y.tmp <- binary.data@y[1:i]\n SE.tmp <- binary.data@SE[1:i]\n names.tmp <- [email protected][1:i]\n bin.data.tmp <- NULL\n if (length(binary.data@g1O1) > 0){\n # if we have group level data for \n # group 1, outcome 1, then we assume\n # we have it for all groups\n g1O1.tmp <- binary.data@g1O1[1:i]\n g1O2.tmp <- binary.data@g1O2[1:i]\n g2O1.tmp <- binary.data@g2O1[1:i]\n g2O2.tmp <- binary.data@g2O2[1:i]\n bin.data.tmp <- new('BinaryData', g1O1=g1O1.tmp, \n g1O2=g1O2.tmp , g2O1=g2O1.tmp, \n g2O2=g2O2.tmp, y=y.tmp, SE=SE.tmp, study.names=names.tmp)\n } else {\n bin.data.tmp <- new('BinaryData', y=y.tmp, SE=SE.tmp, study.names=names.tmp)\n }\n # call the parametric function by name, passing along the \n # data and parameters. Notice that this method knows\n # neither what method its calling nor what parameters\n # it's passing!\n cur.res <- eval(call(fname, bin.data.tmp, params.tmp))\n cur.overall <- eval(call(paste(fname, \".overall\", sep=\"\"), cur.res))\n cum.results[[i]] <- cur.overall \n }\n study.names <- [email protected][1] \n for (count in 2:length([email protected])) {\n study.names <- c(study.names, paste(\"+ \",[email protected][count], sep=\"\"))\n }\n metric.name <- pretty.metric.name(as.character(params.tmp$measure))\n\tmodel.title <- switch(fname,\n binary.fixed.inv.var=paste(\"Binary Fixed-effect Model - Inverse Variance\\n\\nMetric: \", metric.name, sep=\"\"),\n binary.fixed.mh=paste(\"Binary Fixed-effect Model - Mantel Haenszel\\n\\nMetric: \", metric.name, sep=\"\"),\n binary.fixed.peto=paste(\"Binary Fixed-effect Model - Peto\\n\\nMetric: \", metric.name, sep=\"\"),\n binary.random=paste(\"Binary Random-Effects Model\\n\\nMetric: \", metric.name, sep=\"\"))\n\tvalue.info <- switch(fname,\n binary.fixed.inv.var = cumul.rma.uni.value.info(),\n binary.fixed.mh = cumul.rma.mh.value.info(),\n binary.fixed.peto = cumul.rma.mh.value.info(),\n binary.random = cumul.rma.uni.value.info())\n cum.disp <- create.overall.display(res=cum.results, study.names, params, model.title, data.type=\"binary\")\n forest.path <- paste(params$fp_outpath, sep=\"\")\n params.cum <- params\n params.cum$fp_col1_str <- \"Cumulative Studies\"\n params.cum$fp_col2_str <- \"Cumulative Estimate\"\n # column labels for the cumulative (right-hand) plot\n plot.data.cum <- create.plot.data.cum(om.data=binary.data, params.cum, res=cum.results)\n two.plot.data <- list(\"left\"=plot.data, \"right\"=plot.data.cum)\n changed.params <- plot.data$changed.params\n # List of changed params values for standard (left) plot - not cumulative plot!\n # Currently plot edit can't handle two sets of params values for xticks or plot bounds.\n # Could be changed in future.\n params.changed.in.forest.plot <- two.forest.plots(two.plot.data, outpath=forest.path)\n changed.params <- c(changed.params, params.changed.in.forest.plot)\n # Update params changed in two.forest.plots\n params[names(changed.params)] <- changed.params\n # we use the system time as our unique-enough string to store\n # the params object\n forest.plot.params.path <- save.data(binary.data, res, params, two.plot.data)\n # Now we package the results in a dictionary (technically, a named \n # vector). In particular, there are two fields that must be returned; \n # a dictionary of images (mapping titles to image paths) and a list of texts\n # (mapping titles to pretty-printed text). In this case we have only one \n # of each. \n # \n plot.params.paths <- c(\"Cumulative Forest Plot\"=forest.plot.params.path) #hopefully this change (adding 'Cumulative' doesn't break OMA)\n images <- c(\"Cumulative Forest Plot\"=forest.path)\n plot.names <- c(\"cumulative forest plot\"=\"cumulative_forest_plot\")\n\t\n\treferences <- c(res$References, cum_meta_analysis_ref)\n\t\n results <- list(\"images\"=images,\n\t\t\t \"Cumulative Summary\"=cum.disp, \n \"plot_names\"=plot.names, \n \"plot_params_paths\"=plot.params.paths, \n\t\t\t\t\t\"References\"=references,\n\t\t\t\t\t\"res\"=construct.sequential.res.output(cum.results, value.info, replacements=list(estimate='b')),\n\t\t\t\t\t\"res.info\"=list(summary.table=list(type=\"data.frame\", description=\"\"))\n )\n\t\t\t\t\t\n results\n}\n\nconstruct.sequential.res.output <- function(seq.res, value.info, replacements=list()) {\n\t# amalgamates outputs from analysis routines that are groups of other outputs\n\t# Decided to leave these outputs as a table instead of broken up contrasted with how we deal with value.info elsewhere\n\t# replacements is a list mapping names in value.info to names in the underlying fname if they don't match\t\n\t\n\tvalue.names <- names(value.info) # all assumed to be vectors\n\tresults.table <- c()\n\t\n\tget.val<-function(x) {\n\t\tif (name %in% names(replacements))\n\t\t\tval <- x[[replacements[[name]]]]\n\t\telse\n\t\t\tval <- x[[name]]\n\t\t\n\t\tif (is.null(val))\n\t\t\tval <- NA\n\t\tval\n\t}\n\t\n\tfor (name in value.names) {\n\t\tcolumn <- unlist(sapply(seq.res,get.val))\n\t\tresults.table <- cbind(results.table, column)\n\t}\n\tresults.table <- as.data.frame(results.table)\n\tnames(results.table) <- value.names\n\t\n\tlist(summary.table=results.table)\n}\n\nconstruct.subgroup.res.output <- function(subgroups.res) {\n\t# output is a list of subgroup results\n\n\toutput = list()\n\tcount = 0\n\tfor (res in subgroups.res) {\n\t\tcount <- count + 1\n\t\toutput[[count]] <- res\n\t}\n\toutput\n}\n\nconstruct.subgroup.value.info <- function(value.info, subgroup.list) {\n\t# value.info is the value.info from the basic fname\n\t# subgroup.list is a character vector of subgroup names\n\t# Subgroup value info will be a list of single value infos mofo e.g.\n\t# list('Subgroup X'=list(b=list(type=\"vector\" ......), ....),\n\t# 'Subgroup Y'=list(b=list(type=\"vector\" ......), ....)\n\t# )\n\t\n\tsubgroup.value.info <- list()\n\tfor (subgroup_name in subgroup.list) {\n\t\tsubgroup.title<-paste(\"Subgroup\",subgroup_name, sep=\" \")\n\t\tsubgroup.value.info[[subgroup.title]] = value.info\n\t}\n\tsubgroup.value.info[[\"Overall\"]] = value.info\n\tsubgroup.value.info\n}\n\n\nbootstrap.binary <- function(fname, omdata, params, cond.means.data=FALSE) {\n\tbootstrap(fname, omdata, params, cond.means.data)\n}\nbootstrap.continuous <- function(fname, omdata, params, cond.means.data=FALSE) {\n\tbootstrap(fname, omdata, params, cond.means.data)\n}\n\nbootstrap <- function(fname, omdata, params, cond.means.data=FALSE) {\n\t# fname: the function name that runs the basic-meta-analysis\n\t# data: the meta analysis object containing the data of interest\n\t# data.type: the type of the data (binary or continuous)\n\t# ma.params: parameters related to the meta-analysis\n\t# boot.params: parameters related to the boot-strapping analysis in particular\n\t# boot.params$R\n\t# boot.params$plot.path\n\t\n\t\n\trequire(boot)\n\t\n\t####omdata2 <- data.frame(omdata@y, omdata@SE, [email protected])\n\tomdata.rows <- seq(1:length(omdata@y)) # just store the row #s, we will index in to the actual object in the statistic function\n\t#####names(omdata2)<-c(\"y\", \"SE\", \"study.names\")\n\n\t\n\t# extract parameters\n\tconf.level <- params$conf.level\n\tmax.extra.attempts <- 5*params$num.bootstrap.replicates\n\tbootstrap.type <- as.character(params$bootstrap.type)\n\tbootstrap.plot.path <- as.character(params$bootstrap.plot.path)\n\tif (is.null(bootstrap.plot.path)) {\n\t\tbootstrap.plot.path <- \"./r_tmp/bootstrap.png\"\n\t}\n\t\n\t# used in the meta.reg.statistic to see if the covariates match\n\tif (length(omdata@covariates) > 0) {\n\t\tcov.data <- extract.cov.data(omdata, dont.make.array=TRUE)\n\t\tfactor.n.levels <- cov.data$display.data$factor.n.levels\n\t\tn.cont.covs <- cov.data$display.data$n.cont.covs\n\t\tcat.ref.var.and.levels <- cov.data$cat.ref.var.and.levels\n\t}\n\t\n\t\n\t# for bootstrapping a regular meta-analysis\n\tvanilla.statistic <- function(data, indices) {\n\t\tparams.tmp <- params\n\t\tparams.tmp$create.plot <- FALSE\n\t\tparams.tmp$write.to.file <- FALSE\n\t\t\n\t\tdata.tmp <- get.subset(omdata, indices, make.unique.names=TRUE)\n\t\t\t\t\t\t \n\t\t\n\t res <- eval(call(fname, data.tmp, params.tmp))\n\t res.pure <- eval(call(paste(fname, \".overall\", sep=\"\"), res)) # the pure object obtained from metafor (not messed around with by OpenMetaR)\n\t res.pure$b\n\t}\n\t\n\t\n\tmeta.reg.statistic <- function(data, indices) {\n\t\tdata.ok <- function(data.subset) {\n\t\t\tsubset.cov.data <- extract.cov.data(data.subset, dont.make.array=TRUE)\n\t\t\tsubset.factor.n.levels <- subset.cov.data$display.data$factor.n.levels\n\t\t\tsubset.n.cont.covs <- subset.cov.data$display.data$n.cont.covs\n\t\t\tsubset.cat.ref.var.and.levels <- subset.cov.data$cat.ref.var.and.levels\n\t\t\t\n\t\t\t# are the number of levels for each categorical covariate and the number of continuous covariates the same?\n\t\t\tif (!(all(factor.n.levels==subset.factor.n.levels) && all(n.cont.covs==subset.n.cont.covs)))\n\t\t\t\treturn(FALSE)\n\t\t\t\n\t\t\treturn(TRUE)\n\t\t}\n\t\t\n\t\tdata.tmp <- get.subset(omdata, indices, make.unique.names=TRUE)\n\t\terror.during.meta.regression <- FALSE\n\t\tfirst.try <- TRUE\n\t\twhile (first.try || !data.ok(data.tmp) || error.during.meta.regression) {\n\t\t\tif (extra.attempts >= max.extra.attempts)\n\t\t\t\tstop(\"Number of extra attempts exceeded 5x the number of replicates\")\n\t\t\t\n\t\t\t\n\t\t\tif (!first.try) {\n\t\t\t\textra.attempts <<- extra.attempts + 1\n\t\t\t\t#cat(\"attempt: \", extra.attempts, \"\\n\")\n\t\t\t\tnew.indices <- sample.int(length(omdata.rows), size=length(indices), replace=TRUE)\n\t\t\t\tdata.tmp <- get.subset(omdata, new.indices, make.unique.names=TRUE)\n\t\t\t} else {\n\t\t\t\tfirst.try <- FALSE\n\t\t\t}\n\n\t\t\tif (data.ok(data.tmp)) {\n\t\t\t\t#cat(\" data is ok maybe\")\n\t\t\t\t\n\t\t\t\t# try to run the meta.regression\n\t\t\t\tres <- try(meta.regression(data.tmp, params, stop.at.rma=TRUE), silent=FALSE)\n\t\t\t\tif (class(res)[1] == \"try-error\") {\n\t\t\t\t\terror.during.meta.regression <- TRUE\n\t\t\t\t\t#cat(\"There was ane error during meta regression\\n\")\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\terror.during.meta.regrssion <- FALSE\n\t\t\t\t}\n\t\t\t}\n\t\t} # end while\n\t\t\n\n\t\tres$b\n\t}\n\t\n\t# generate design matrix for transform if we are doing bootstrapped conditional means\n\tif (bootstrap.type == \"boot.meta.reg.cond.means\")\n\t\ta.matrix <- generate.a.matrix(omdata, cat.ref.var.and.levels, cond.means.data)\n\tmeta.reg.cond.means.statistic <- function(data, indices) {\n\t\tunconditional.b <- meta.reg.statistic(data, indices)\n\t\tnew_betas <- a.matrix %*% matrix(unconditional.b, ncol=1)\n\t\tnew_betas\n\t}\n\t\n\tstatistic <- switch(bootstrap.type,\n\t\t\t\t\t\tboot.ma = vanilla.statistic,\n\t\t\t\t\t\tboot.meta.reg = meta.reg.statistic,\n\t\t\t\t\t\tboot.meta.reg.cond.means = meta.reg.cond.means.statistic)\n\textra.attempts <- 0\n\tboot.res <- boot(omdata.rows, statistic=statistic, R=params$num.bootstrap.replicates)\n\tparams$extra.attempts <- extra.attempts\n\n\tcat(\"Total extra attempts: \"); cat(extra.attempts); cat(\"\\n\")\n\t\n\n\t\n\tresults <- switch(bootstrap.type,\n\t\t\tboot.ma = boot.ma.output.results(boot.res, params, bootstrap.plot.path),\n\t\t\tboot.meta.reg = boot.meta.reg.output.results(boot.res, params, bootstrap.plot.path, cov.data),\n\t\t\tboot.meta.reg.cond.means = boot.meta.reg.cond.means.output.results(omdata, boot.res, params, bootstrap.plot.path, cov.data, cond.means.data))\n\t\n\t# For making textfile output of data\n\ttextfile.data <- construct.boot.res.and.value.info.for.results(results, boot.res, bootstrap.type)\n\tresults <- c(results, textfile.data) # res and res.info\n\t\n\tresults\n\t\n}\n\n# For making textfile output of data\nconstruct.boot.res.and.value.info.for.results <- function(results, boot.res, bootstrap.type) {\n\tsummary <- switch(bootstrap.type,\n\t\t\t\t\t boot.ma = results$Summary,\n\t\t\t\t boot.meta.reg = results$Summary,\n\t\t\t\t\t boot.meta.reg.cond.means = results[[\"Bootstrapped Meta-Regression Based Conditional Means\"]])\n\tsummary.name <- switch(bootstrap.type,\n\t\t\t boot.ma = \"Summary\",\n\t\t\t\t\t boot.meta.reg = \"Summary\",\n\t\t\t\t\t boot.meta.reg.cond.means = \"Bootstrapped Meta-Regression Based Conditional Means Summary\")\n\txlabels <- switch(bootstrap.type,\n\t\t\t\t\t boot.ma = NA,\n\t\t\t\t\t boot.meta.reg = results$gui.ignore.xlabels,\n\t\t\t\t\t boot.meta.reg.cond.means = results$gui.ignore.xlabels)\n\tres <- list()\n\tres.info <- list()\n\tres[[summary.name]] <- summary\n\tres.info[[summary.name]] <- list(type=\"blob\", description=\"\")\n\t\n\tif (isnt.na(xlabels)) {\n\t\tres[['coefficient_labels']] = xlabels\n\t\tres.info[['coefficient_labels']] = list(type=\"vector\", description=\"Coefficients in t given in the following order\")\n\t}\n\t\n\tres$t <- boot.res$t\n\tres.info$t <- list(type=\"matrix\", description=\"A matrix with #replicates rows, each of which is a bootstrap replicate\")\n\t\n\tlist(res=res,\n\t\t res.info=res.info)\n}\n\n\nboot.ma.output.results <- function(boot.results, params, bootstrap.plot.path) {\n\tconf.interval <- boot.ci(boot.out = boot.results, type = \"norm\")\n\tmean_boot <- mean(boot.results$t)\n\t\n\tconf.interval.msg <- paste(\"The \", conf.interval$norm[1]*100, \"% Confidence Interval: [\", round(conf.interval$norm[2],digits=params$digits), \", \", round(conf.interval$norm[3],digits=params$digits), \"]\", sep=\"\")\n\tmean.msg <- paste(\"The observed value of the effect size was \", round(boot.results$t0, digits=params$digits), \", while the mean over the replicates was \", round(mean_boot,digits=params$digits), \".\", sep=\"\")\n\tsummary.msg <- paste(conf.interval.msg, \"\\n\", mean.msg, sep=\"\")\n\t# Make histogram\n\tpng(file=bootstrap.plot.path)\n\tplot.custom.boot(boot.results, title=as.character(params$histogram.title), xlab=c(as.character(params$histogram.xlab)), ci.lb=conf.interval$norm[2], ci.ub=conf.interval$norm[3])\n\tgraphics.off()\n\t\n\timages <- c(\"Histogram\"=bootstrap.plot.path)\n\tplot.names <- c(\"histogram\"=\"histogram\")\n\tresults <- list(\"images\"=images,\n\t\t\t\"Summary\"=summary.msg)\n\tresults\n}\n\ncalc.meta.reg.coeffs.and.cis <- function(boot.results) {\n\tdim.t <- dim(boot.results$t)\n\tnum.rows <- dim.t[1]\n\tnum.coeffs <- dim.t[2]\n\t\n\tcoeffs.and.cis <- data.frame(b=c(), ci.lb=c(), ci.ub=c())\n\tfor (i in 1:num.coeffs) {\n\t\tmean_coeff <- mean(boot.results$t[,i])\n\t\tconf.interval <- boot.ci(boot.out = boot.results, type=\"norm\", index=i)\n\t\tnew.result.row <- data.frame(b=mean_coeff, ci.lb=conf.interval$norm[2], ci.ub=conf.interval$norm[3])\n\t\tcoeffs.and.cis <- rbind(coeffs.and.cis, new.result.row)\n\t}\n\tcoeffs.and.cis\n}\n\nboot.meta.reg.output.results <- function(boot.results, params, bootstrap.plot.path, cov.data) {\n\tcoeffs.and.cis <- calc.meta.reg.coeffs.and.cis(boot.results)\n\t\n\t\n\tdisplay.data <- cov.data$display.data\n\treg.disp <- create.regression.display(coeffs.and.cis, params, display.data)\n\n\t\n\t\n\t#### Get labels to label histograms with\n\tcov.display.col <- display.data$cov.display.col\n\tlevels.display.col <- display.data$levels.display.col\n\tfactor.n.levels <- display.data$factor.n.levels\n\t\n\tnon.empty.levels.labels <- levels.display.col[levels.display.col!=\"\"]\n\twanted.cov.display.col.labels <- cov.display.col[1:(length(cov.display.col)-length(non.empty.levels.labels))]\n\tfactor.index <- 0\n\tfor (n.level in factor.n.levels) {\n\t\t# replace unwanted entry with \"\"\n\t\tnon.empty.levels.labels[(factor.index+1)] <- \"\"\n\t\tfactor.index <- factor.index + n.level\n\t}\n\t# remove \"\"\n\tnon.empty.levels.labels <- non.empty.levels.labels[non.empty.levels.labels!=\"\"]\n\t#### end of get labels to to label histograms with\n\t\n\txlabels <- c(wanted.cov.display.col.labels,non.empty.levels.labels)\n\txlabels.clean <- xlabels\n\txlabels <- paste(xlabels, \"Coefficient\")\n\t\n\t# Make histograms\n\tpng(file=bootstrap.plot.path, width = 480, height = 480*length(xlabels))\n\tplot.custom.boot(boot.results,\n\t\t\t\t\t title=as.character(params$histogram.title),\n\t\t\t\t\t xlabs=xlabels,\n\t\t\t\t\t ci.lb=coeffs.and.cis$ci.lb,\n\t\t\t\t\t ci.ub=coeffs.and.cis$ci.ub)\n\tgraphics.off()\n\n\timages <- c(\"Histograms\"=bootstrap.plot.path)\n\tplot.names <- c(\"histograms\"=\"histograms\")\n\toutput.results <- list(\"images\"=images,\n\t\t\t\t\t\t \"Summary\"=reg.disp,\n\t\t\t\t\t\t \"gui.ignore.xlabels\"=xlabels.clean)\n\toutput.results\n}\n\nboot.meta.reg.cond.means.output.results <- function(omdata, boot.results, params, bootstrap.plot.path, cov.data, cond.means.data) {\n\tcoeffs.and.cis <- calc.meta.reg.coeffs.and.cis(boot.results)\n\tcat.ref.var.and.levels <- cov.data$cat.ref.var.and.levels\n\tchosen.cov.name = as.character(cond.means.data$chosen.cov.name)\n\t\n\tboot.cond.means.disp <- boot.cond.means.display(omdata, coeffs.and.cis, params, cat.ref.var.and.levels, cond.means.data)\n\n\t# Make histograms\n\txlabels <- cat.ref.var.and.levels[[chosen.cov.name]]\n\txlabels.clean <- xlabels\n\txlabels <- paste(\"Conditional Mean of\", xlabels)\n\t\n\tpng(file=bootstrap.plot.path, width = 480, height = 480*length(xlabels))\n\tplot.custom.boot(boot.results,\n\t\t\ttitle=as.character(params$histogram.title),\n\t\t\txlabs=xlabels,\n\t\t\tci.lb=coeffs.and.cis$ci.lb,\n\t\t\tci.ub=coeffs.and.cis$ci.ub)\n\tgraphics.off()\n\t\n\timages <- c(\"Histograms\"=bootstrap.plot.path)\n\tplot.names <- c(\"histograms\"=\"histograms\")\n\toutput.results <- list(\"images\"=images,\n\t\t\t\t\t\t \"Bootstrapped Meta-Regression Based Conditional Means\"=boot.cond.means.disp,\n\t\t\t\t\t\t \"gui.ignore.xlabels\"=xlabels.clean)\n\toutput.results\n}\n\nplot.custom.boot <- function(boot.out, title=\"Bootstrap Histogram\", ci.lb, ci.ub, xlabs=c(\"Effect Size\")) {\n#\n# Plots the Histogram \n#\n\t\n\tconst <- function(w, eps=1e-8) {\n\t# Are all of the values of w equal to within the tolerance eps.\n\t\tall(abs(w-mean(w, na.rm=TRUE)) < eps)\n\t}\n\tnum.hists <- length(xlabs)\n\tpar(mfcol=c(num.hists,1))\n\tfor (index in 1:num.hists) {\n\t\tqdist <- \"norm\"\n\t\tt <- boot.out$t[,index]\n\t\tt0 <- boot.out$t0[index]\n\t\tt <- t[is.finite(t)]\n\t\tif (const(t, min(1e-8,mean(t, na.rm=TRUE)/1e6))) {\n\t\t\tprint(paste(\"All values of t* are equal to \", mean(t, na.rm=TRUE)))\n\t\t\treturn(invisible(boot.out))\n\t\t}\n\t\tnclass <- min(max(ceiling(length(t)/25),10),100)\n\t\tR <- boot.out$R\n\t\t\n\t\thist(t,nclass=nclass,probability=TRUE,xlab=xlabs[index], main=title)\n\t\tabline(v=t0,lty=1)\n\t\tabline(v=ci.lb[index],lty=3) # conf. interval lines\n\t\tabline(v=ci.ub[index],lty=3)\n\t}\n}\n\n##################################\n# binary leave-one-out MA #\n##################################\nloo.ma.binary <- function(fname, binary.data, params){\n # assert that the argument is the correct type\n if (!(\"BinaryData\" %in% class(binary.data))) stop(\"Binary data expected.\")\n \n\t######## START REFACTOR HERE ############\n loo.results <- array(list(NULL), dim=c(length([email protected])))\n params.tmp <- params\n \n #params.tmp$create.plot <- FALSE\n #params.tmp$write.to.file <- FALSE\n\tparams.tmp$supress.output <- TRUE\n # don't create plots when calling individual binary methods\n res <- eval(call(fname, binary.data, params.tmp))\n res.overall <- eval(call(paste(fname, \".overall\", sep=\"\"), res))\n N <- length([email protected])\n for (i in 1:N){\n # get a list of indices, i.e., the subset\n # that is 1:N with i left out\n index.ls <- setdiff(1:N, i)\n \n # build a BinaryData object with the \n # ith study removed. \n y.tmp <- binary.data@y[index.ls]\n SE.tmp <- binary.data@SE[index.ls]\n names.tmp <- [email protected][index.ls]\n bin.data.tmp <- NULL\n \n if (length(binary.data@g1O1) > 0){\n # if we have group level data for \n # group 1, outcome 1, then we assume\n # we have it for all groups\n g1O1.tmp <- binary.data@g1O1[index.ls]\n g1O2.tmp <- binary.data@g1O2[index.ls]\n g2O1.tmp <- binary.data@g2O1[index.ls]\n g2O2.tmp <- binary.data@g2O2[index.ls]\n bin.data.tmp <- new('BinaryData', g1O1=g1O1.tmp, \n g1O2=g1O2.tmp , g2O1=g2O1.tmp, \n g2O2=g2O2.tmp, y=y.tmp, SE=SE.tmp, study.names=names.tmp)\n } else{\n bin.data.tmp <- new('BinaryData', y=y.tmp, SE=SE.tmp, study.names=names.tmp)\n }\n # call the parametric function by name, passing along the \n # data and parameters. Notice that this method knows\n # neither what method its calling nor what parameters\n # it's passing!\n cur.res <- eval(call(fname, bin.data.tmp, params.tmp))\n cur.overall <- eval(call(paste(fname, \".overall\", sep=\"\"), cur.res))\n loo.results[[i]] <- cur.overall\n }\n loo.results <- c(list(res.overall), loo.results)\n\t\n\t#### END REFACTORING HERE ##################\n\t\n\t\n # Add overall results\n study.names <- c(\"Overall\", paste(\"- \",[email protected], sep=\"\"))\n metric.name <- pretty.metric.name(as.character(params$measure))\n\tmodel.title <- switch(fname,\n\t\t\tbinary.fixed.inv.var = paste(\"Binary Fixed-effect Model - Inverse Variance\\n\\nMetric: \", metric.name, sep=\"\"),\n\t\t\tbinary.fixed.mh = paste(\"Binary Fixed-effect Model - Mantel Haenszel\\n\\nMetric: \", metric.name, sep=\"\"),\n\t\t\tbinary.fixed.peto = paste(\"Binary Fixed-effect Model - Peto\\n\\nMetric: \", metric.name, sep=\"\"),\n\t\t\tbinary.random = paste(\"Binary Random-Effects Model\\n\\nMetric: \", metric.name, sep=\"\"))\n\tvalue.info <- switch(fname,\n\t\t\tbinary.fixed.inv.var = loo.rma.uni.value.info(),\n\t\t\tbinary.fixed.mh = loo.rma.mh.value.info(),\n\t\t\tbinary.fixed.peto = loo.rma.mh.value.info(),\n\t\t\tbinary.random = loo.rma.uni.value.info())\n\tloo.disp <- create.overall.display(res=loo.results, study.names, params, model.title, data.type=\"binary\")\n forest.path <- paste(params$fp_outpath, sep=\"\")\n plot.data <- create.plot.data.loo(binary.data, params, res=loo.results)\n changed.params <- plot.data$changed.params\n # list of changed params values\n params.changed.in.forest.plot <- forest.plot(forest.data=plot.data, outpath=forest.path)\n changed.params <- c(changed.params, params.changed.in.forest.plot)\n params[names(changed.params)] <- changed.params\n # update params values\n # we use the system time as our unique-enough string to store\n # the params object\n forest.plot.params.path <- save.data(binary.data, res=loo.results, params, plot.data)\n #\n # Now we package the results in a dictionary (technically, a named \n # vector). In particular, there are two fields that must be returned; \n # a dictionary of images (mapping titles to image paths) and a list of texts\n # (mapping titles to pretty-printed text). In this case we have only one \n # of each. \n # \n plot.params.paths <- c(\"Leave-one-out Forest Plot\"=forest.plot.params.path)\n images <- c(\"Leave-one-out Forest Plot\"=forest.path)\n plot.names <- c(\"loo forest plot\"=\"loo_forest_plot\")\n\treferences <- c(res$References, loo_ma_ref)\n results <- list(\"images\"=images,\n\t\t\t \"Leave-one-out Summary\"=loo.disp, \n \"plot_names\"=plot.names, \n \"plot_params_paths\"=plot.params.paths,\n\t\t\t\t\t\"References\"=references,\n\t\t\t\t\t\"res\"=construct.sequential.res.output(loo.results,\n\t\t\t\t\t\t value.info,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t replacements=list(estimate='b',\n\t\t\t\t\t\t\t\t\t Q='QE',\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tQp='QEp')),\n\t\t\t\t\t\"res.info\"=list(summary.table=list(type=\"data.frame\", description=\"\"))\n\t\t\t)\n results\n}\n\n##################################\n# continuous cumulative MA #\n##################################\ncum.ma.continuous <- function(fname, cont.data, params){\n # assert that the argument is the correct type\n if (!(\"ContinuousData\" %in% class(cont.data))) stop(\"Continuous data expected.\")\n \n params.tmp <- params\n #params.tmp$create.plot <- FALSE\n #params.tmp$write.to.file <- FALSE\n\tparams.tmp$supress.output <- TRUE\n res <- eval(call(fname, cont.data, params.tmp))\n res.overall <- eval(call(paste(fname, \".overall\", sep=\"\"), res))\n # parse out the overall estimate\n plot.data <- create.plot.data.continuous(cont.data, params, res=res.overall)\n # data for standard forest plot\n \n params$fp_show_col3 <- FALSE\n params$fp_show_col4 <- FALSE\n # cumulative plot does not display raw data\n params$fp_col1_str <- \"Cumulative Studies\"\n \n # iterate over the continuousData elements, adding one study at a time\n cum.results <- array(list(NULL), dim=c(length([email protected])))\n \n for (i in 1:length([email protected])){\n # build a ContinuousData object including studies\n # 1 through i\n y.tmp <- cont.data@y[1:i]\n SE.tmp <- cont.data@SE[1:i]\n names.tmp <- [email protected][1:i]\n cont.data.tmp <- NULL\n if (length(cont.data@N1) > 0){\n # if we have group level data for \n # group 1, outcome 1, then we assume\n # we have it for all groups\n N1.tmp <- cont.data@N1[1:i]\n mean1.tmp <- cont.data@mean1[1:i]\n sd1.tmp <- cont.data@sd1[1:i]\n N2.tmp <- cont.data@N2[1:i]\n mean2.tmp <- cont.data@mean2[1:i]\n sd2.tmp <- cont.data@sd2[1:i]\n cont.data.tmp <- new('ContinuousData', \n N1=N1.tmp, mean1=mean1.tmp , sd1=sd1.tmp, \n N2=N2.tmp, mean2=mean2.tmp, sd2=sd2.tmp,\n y=y.tmp, SE=SE.tmp, \n study.names=names.tmp)\n }\n else{\n cont.data.tmp <- new('ContinuousData', \n y=y.tmp, SE=SE.tmp, \n study.names=names.tmp)\n }\n # call the parametric function by name, passing along the \n # data and parameters. Notice that this method knows\n # neither what method its calling nor what parameters\n # it's passing!\n cur.res <- eval(call(fname, cont.data.tmp, params.tmp))\n cur.overall <- eval(call(paste(fname, \".overall\", sep=\"\"), cur.res))\n cum.results[[i]] <- cur.overall\n }\n study.names <- c()\n study.names <- [email protected][1] \n for (count in 2:length([email protected])) {\n study.names <- c(study.names, paste(\"+ \",[email protected][count], sep=\"\"))\n }\n \n metric.name <- pretty.metric.name(as.character(params$measure))\n\tmodel.title <- switch(fname,\n continuous.fixed = paste(\"Continuous Fixed-effect Model - Inverse Variance\\n\\nMetric: \", metric.name, sep=\"\"),\n\t\t\t\t\t\t continuous.random = paste(\"Continuous Random-Effects Model\\n\\nMetric: \", metric.name, sep=\"\"))\n value.info <- switch(fname,\n\t\t\t\t\t\t continuous.fixed = cumul.rma.uni.value.info(), \n\t continuous.random = cumul.rma.uni.value.info())\n\t\n\tcum.disp <- create.overall.display(res=cum.results, study.names, params, model.title, data.type=\"continuous\")\n forest.path <- paste(params$fp_outpath, sep=\"\")\n params.cum <- params\n params.cum$fp_col1_str <- \"Cumulative Studies\"\n params.cum$fp_col2_str <- \"Cumulative Estimate\"\n plot.data.cum <- create.plot.data.cum(om.data=cont.data, params.cum, res=cum.results)\n two.plot.data <- list(\"left\"=plot.data, \"right\"=plot.data.cum)\n changed.params <- plot.data$changed.params\n # List of changed params values for standard (left) plot - not cumulative plot!\n # Currently plot edit can't handle two sets of params values for xticks or plot bounds.\n # Could be changed in future.\n params.changed.in.forest.plot <- two.forest.plots(two.plot.data, outpath=forest.path)\n changed.params <- c(changed.params, params.changed.in.forest.plot)\n # Update params changed in two.forest.plots\n params[names(changed.params)] <- changed.params\n # we use the system time as our unique-enough string to store\n # the params object\n forest.plot.params.path <- save.data(cont.data, res=cum.results, params, two.plot.data)\n #\n # Now we package the results in a dictionary (technically, a named \n # vector). In particular, there are two fields that must be returned; \n # a dictionary of images (mapping titles to image paths) and a list of texts\n # (mapping titles to pretty-printed text). In this case we have only one \n # of each. \n # \n plot.params.paths <- c(\"Cumulative Forest Plot\"=forest.plot.params.path)\n images <- c(\"Cumulative Forest Plot\"=forest.path)\n plot.names <- c(\"cumulative forest plot\"=\"cumulative forest_plot\")\n\t\n\treferences <- c(res$References, cum_meta_analysis_ref)\n results <- list(\"images\"=images,\n\t\t\t \"Cumulative Summary\"=cum.disp, \n \"plot_names\"=plot.names, \n \"plot_params_paths\"=plot.params.paths,\n\t\t\t\t\t\"References\"=references,\n\t\t\t\t\t\"res\"=construct.sequential.res.output(cum.results, value.info, replacements=list(estimate='b')),\n\t\t\t\t\t\"res.info\"=list(summary.table=list(type=\"data.frame\", description=\"\"))\n\t\t\t)\n results\n}\n\n\n#################################\n# diagnostic cumulative MA #\n#################################\ncum.ma.diagnostic <- function(fname, diagnostic.data, params){\n\t# assert that the argument is the correct type\n\tif (!(\"DiagnosticData\" %in% class(diagnostic.data))) stop(\"Diagnostic data expected.\") \n\t\n\tparams.tmp <- params\n\t# These temporarily turn off creating plots and writing results to file\n\tparams.tmp$create.plot <- FALSE\n\tparams.tmp$write.to.file <- FALSE\n\tres <- eval(call(fname, diagnostic.data, params.tmp))\n\tres.overall <- eval(call(paste(fname, \".overall\", sep=\"\"), res))\n\t# parse out the overall estimate\n\tplot.data <- create.plot.data.diagnostic(diagnostic.data, params, res.overall)\n\t# data for standard forest plot\n\t\n\t####\n\t#### SOMETHING MISSING HERE?\n\t####\n\t\n\t# iterate over the binaryData elements, adding one study at a time\n\tcum.results <- array(list(NULL), dim=c(length([email protected])))\n\t\n\tfor (i in 1:length([email protected])){\n\t\t# build a DiagnosticData object including studies\n\t\t# 1 through i\n\t\ty.tmp <- diagnostic.data@y[1:i]\n\t\tSE.tmp <- diagnostic.data@SE[1:i]\n\t\tnames.tmp <- [email protected][1:i]\n\t\tbin.data.tmp <- NULL\n\t\t\n\t\tif (length(diagnostic.data@TP) > 0){\n\t\t\t# if we have group level data for \n\t\t\t# group 1, outcome 1, then we assume\n\t\t\t# we have it for all groups\n\t\t\tTP.tmp <- diagnostic.data@TP[1:i]\n\t\t\tFN.tmp <- diagnostic.data@FN[1:i]\n\t\t\tFP.tmp <- diagnostic.data@FP[1:i]\n\t\t\tTN.tmp <- diagnostic.data@TN[1:i]\n\t\t\tdiag.data.tmp <- new('DiagnosticData', TP=TP.tmp, \n\t\t\t\t\tFN=FN.tmp , FP=FP.tmp, \n\t\t\t\t\tTN=TN.tmp, y=y.tmp, SE=SE.tmp, study.names=names.tmp)\n\t\t} else {\n\t\t\tdiag.data.tmp <- new('DiagnosticData', y=y.tmp, SE=SE.tmp, study.names=names.tmp)\n\t\t}\n\t\t# call the parametric function by name, passing along the \n\t\t# data and parameters. Notice that this method knows\n\t\t# neither what method its calling nor what parameters\n\t\t# it's passing!\n\t\tcur.res <- eval(call(fname, diag.data.tmp, params.tmp))\n\t\tcur.overall <- eval(call(paste(fname, \".overall\", sep=\"\"), cur.res))\n\t\tcum.results[[i]] <- cur.overall \n\t}\n\tstudy.names <- [email protected][1] \n\tfor (count in 2:length([email protected])) {\n\t\tstudy.names <- c(study.names, paste(\"+ \", [email protected][count], sep=\"\"))\n\t}\n\tmetric.name <- pretty.metric.name(as.character(params.tmp$measure))\n\tmodel.title <- switch(fname,\n diagnostic.bivariate.ml = paste(\"Diagnostic Bivariate - Maximum Likelihood\\n\\nMetric: \", metric.name, sep=\"\"),\n diagnostic.fixed.inv.var = paste(\"Diagnostic Fixed-Effect Inverse Variance\\n\\nMetric: \", metric.name, sep=\"\"),\n diagnostic.fixed.mh = paste(\"Diagnostic Fixed-Effect Mantel Haenszel\\n\\nMetric: \", metric.name, sep=\"\"),\n diagnostic.fixed.peto = paste(\"Diagnostic Fixed-Effect Peto\\n\\nMetric: \", metric.name, sep=\"\"),\n diagnostic.hsroc = paste(\"Diagnostic HSROC\\n\\nMetric: \", metric.name, sep=\"\"),\n diagnostic.random = paste(\"Diagnostic Random-Effects\\n\\nMetric: \", metric.name, sep=\"\"))\n\tcum.disp <- create.overall.display(res=cum.results, study.names, params, model.title, data.type=\"diagnostic\")\n\tforest.path <- paste(params$fp_outpath, sep=\"\")\n\tparams.cum <- params\n\tparams.cum$fp_col1_str <- \"Cumulative Studies\"\n\tparams.cum$fp_col2_str <- \"Cumulative Estimate\"\n\t# column labels for the cumulative (right-hand) plot\n\tplot.data.cum <- create.plot.data.cum(om.data=diagnostic.data, params.cum, res=cum.results)\n\ttwo.plot.data <- list(\"left\"=plot.data, \"right\"=plot.data.cum)\n\tchanged.params <- plot.data$changed.params\n\t# List of changed params values for standard (left) plot - not cumulative plot!\n\t# Currently plot edit can't handle two sets of params values for xticks or plot bounds.\n\t# Could be changed in future.\n\tparams.changed.in.forest.plot <- two.forest.plots(two.plot.data, outpath=forest.path)\n\tchanged.params <- c(changed.params, params.changed.in.forest.plot)\n\t# Update params changed in two.forest.plots\n\tparams[names(changed.params)] <- changed.params\n\t# we use the system time as our unique-enough string to store\n\t# the params object\n\tforest.plot.params.path <- save.data(diagnostic.data, res, params, two.plot.data)\n\t# Now we package the results in a dictionary (technically, a named \n\t# vector). In particular, there are two fields that must be returned; \n\t# a dictionary of images (mapping titles to image paths) and a list of texts\n\t# (mapping titles to pretty-printed text). In this case we have only one \n\t# of each. \n\t# \n\tplot.params.paths <- c(\"Cumulative Forest Plot\"=forest.plot.params.path)\n\timages <- c(\"Cumulative Forest Plot\"=forest.path)\n\tplot.names <- c(\"cumulative forest plot\"=\"cumulative_forest_plot\")\n\t\n\treferences <- c(res$References, cum_meta_analysis_ref)\n\t\n\tresults <- list(\"images\"=images,\n\t\t\t \"Cumulative Summary\"=cum.disp,\n\t\t\t \"plot_names\"=plot.names, \n\t\t\t\t\t\"plot_params_paths\"=plot.params.paths,\n\t\t\t\t\t\"References\"=references)\n\tresults\n}\n\n\nmultiple.cum.ma.diagnostic <- function(fnames, params.list, diagnostic.data) {\n\t# wrapper for applying cum.ma method to multiple diagnostic functions and metrics \n\t\n\t# fnames -- names of diagnostic meta-analytic functions to call\n\t# params.list -- parameter lists to be passed along to the functions in\n\t# fnames\n\t# diagnostic.data -- the (diagnostic data) that is to be analyzed \n\t\n\t\n\tresults <- list()\n\tpretty.names <- diagnostic.fixed.inv.var.pretty.names()\n\timages <- c()\n\tplot.names <- c()\n\tplot.params.paths <- c()\n\t\n\treferences <- c()\n\t\t\t\n\tfor (count in 1:length(params.list)) {\n\t\tparams <- params.list[[count]]\n\t\tfname <- fnames[count]\n\t\tdiagnostic.data <- compute.diag.point.estimates(diagnostic.data, params)\n\t\tres <- cum.ma.diagnostic(fname, diagnostic.data, params)\n\t\t\n\t\tsummary <- list(\"Summary\"=res[[\"Cumulative Summary\"]])\n\t\tnames(summary) <- paste(eval(parse(text=paste(\"pretty.names$measure$\", params$measure,sep=\"\"))), \" Summary\", sep=\"\")\n\t\t\n\t\tresults <- c(results, summary)\n\t\t\n\t\timage.name <- paste(params$measure, \"Forest Plot\", sep=\" \")\n\t\timages.tmp <- c(res$images[[1]])\n\t\tnames(images.tmp) <- image.name\n\t\timages <- c(images, images.tmp)\n\t\t\n\t\tplot.names.tmp <- c(\"forest plot\"=\"forest.plot\")\n\t\tplot.names <- c(plot.names, plot.names.tmp)\n\t\t\n\t\t#plot.params.paths <-\n\t\t\n\t\treferences <- c(references, res$References)\n\t\n\t}\n\t\n\tresults <- c(results, list(\"images\"=images,\n\t\t\t\t\t \"plot_names\"=plot.names,\n\t\t\t\t\t\t\t \"References\"=references))\n\tresults\n\t\n\n\n}\n\n##################################\n# continuous leave-one-out MA #\n##################################\nloo.ma.continuous <- function(fname, cont.data, params){\n # assert that the argument is the correct type\n if (!(\"ContinuousData\" %in% class(cont.data))) stop(\"Continuous data expected.\")\n \n loo.results <- array(list(NULL), dim=c(length([email protected])))\n params.tmp <- params\n #params.tmp$create.plot <- FALSE\n #params.tmp$write.to.file <- FALSE\n\tparams.tmp$supress.output <- TRUE\n res <- eval(call(fname, cont.data, params.tmp))\n res.overall <- eval(call(paste(fname, \".overall\", sep=\"\"), res))\n N <- length([email protected])\n for (i in 1:N){\n # get a list of indices, i.e., the subset\n # that is 1:N with i left out\n index.ls <- setdiff(1:N, i)\n \n # build a ContinuousData object with the \n # ith study removed. \n y.tmp <- cont.data@y[index.ls]\n SE.tmp <- cont.data@SE[index.ls]\n names.tmp <- [email protected][index.ls]\n bin.data.tmp <- NULL\n \n # build a BinaryData object with the \n # ith study removed. \n y.tmp <- cont.data@y[index.ls]\n SE.tmp <- cont.data@SE[index.ls]\n names.tmp <- [email protected][index.ls]\n cont.data.tmp <- NULL\n \n if (length(cont.data@N1) > 0){\n # if we have group level data for \n # group 1, outcome 1, then we assume\n # we have it for all groups\n N1.tmp <- cont.data@N1[index.ls]\n mean1.tmp <- cont.data@mean1[index.ls]\n sd1.tmp <- cont.data@sd1[index.ls]\n N2.tmp <- cont.data@N2[index.ls]\n mean2.tmp <- cont.data@mean2[index.ls]\n sd2.tmp <- cont.data@sd2[index.ls]\n cont.data.tmp <- new('ContinuousData', \n N1=N1.tmp, mean1=mean1.tmp , sd1=sd1.tmp, \n N2=N2.tmp, mean2=mean2.tmp, sd2=sd2.tmp,\n y=y.tmp, SE=SE.tmp, \n study.names=names.tmp)\n }\n else{\n cont.data.tmp <- new('ContinuousData', \n y=y.tmp, SE=SE.tmp, \n study.names=names.tmp)\n }\n # call the parametric function by name, passing along the \n # data and parameters. Notice that this method knows\n # neither what method its calling nor what parameters\n # it's passing!\n cur.res <- eval(call(fname, cont.data.tmp, params.tmp))\n cur.overall <- eval(call(paste(fname, \".overall\", sep=\"\"), cur.res))\n loo.results[[i]] <- cur.overall\n }\n loo.results <- c(list(res.overall), loo.results)\n # Add overall results\n study.names <- c(\"Overall\", paste(\"- \", [email protected], sep=\"\"))\n params$data.type <- \"continuous\"\n metric.name <- pretty.metric.name(as.character(params$measure))\n\tmodel.title <- switch(fname,\n\t\t\tcontinuous.fixed=paste(\"Continuous Fixed-effect Model - Inverse Variance\\n\\nMetric: \", metric.name, sep=\"\"),\n\t\t\tcontinuous.random=paste(\"Continuous Random-Effects Model\\n\\nMetric: \", metric.name, sep=\"\"))\n\tvalue.info <- switch(fname,\n\t\t\tcontinuous.fixed = loo.rma.uni.value.info(), \n\t\t\tcontinuous.random = loo.rma.uni.value.info())\n\tloo.disp <- create.overall.display(res=loo.results, study.names, params, model.title, data.type=\"continuous\")\n forest.path <- paste(params$fp_outpath, sep=\"\")\n plot.data <- create.plot.data.loo(cont.data, params, res=loo.results)\n changed.params <- plot.data$changed.params\n # list of changed params values\n params.changed.in.forest.plot <- forest.plot(forest.data=plot.data, outpath=forest.path)\n changed.params <- c(changed.params, params.changed.in.forest.plot)\n params[names(changed.params)] <- changed.params\n # update params values\n # we use the system time as our unique-enough string to store\n # the params object\n forest.plot.params.path <- save.data(cont.data, res=loo.results, params, plot.data)\n #\n # Now we package the results in a dictionary (technically, a named \n # vector). In particular, there are two fields that must be returned; \n # a dictionary of images (mapping titles to image paths) and a list of texts\n # (mapping titles to pretty-printed text). In this case we have only one \n # of each. \n # \n plot.params.paths <- c(\"Leave-one-out Forest Plot\"=forest.plot.params.path)\n images <- c(\"Leave-one-out Forest Plot\"=forest.path)\n plot.names <- c(\"loo forest plot\"=\"loo_forest_plot\")\n\treferences <- c(res$References, loo_ma_ref)\n results <- list(\"images\"=images,\n\t\t\t \"Leave-one-out Summary\"=loo.disp, \n \"plot_names\"=plot.names, \n \"plot_params_paths\"=plot.params.paths,\n\t\t\t\t\t\"References\"=references,\n\t\t\t\t\t\"res\"=construct.sequential.res.output(loo.results,\n\t\t\t\t\t\t\tvalue.info,\n\t\t\t\t\t\t\treplacements=list(estimate='b',\n\t\t\t\t\t\t\t\t\tQ='QE',\n\t\t\t\t\t\t\t\t\tQp='QEp')),\n\t\t\t\t\t\"res.info\"=list(summary.table=list(type=\"data.frame\", description=\"\"))\n\t\t\t)\n results\n}\n\n########################\n# binary subgroup MA #\n########################\nsubgroup.ma.binary <- function(fname, binary.data, params){\n # assert that the argument is the correct type\n if (!(\"BinaryData\" %in% class(binary.data))) stop(\"Binary data expected.\")\n cov.name <- as.character(params$cov_name)\n selected.cov <- get.cov(binary.data, cov.name)\n cov.vals <- [email protected]\n params.tmp <- params\n #params.tmp$create.plot <- FALSE\n #params.tmp$write.to.file <- FALSE\n\tparams.tmp$supress.output <- TRUE\n subgroup.list <- unique(cov.vals)\n grouped.data <- array(list(NULL),c(length(subgroup.list)+1))\n subgroup.results <- array(list(NULL), c(length(subgroup.list)+1))\n col3.nums <- NULL\n col3.denoms <- NULL\n col4.nums <- NULL\n col4.denoms <- NULL\n count <- 1\n for (i in subgroup.list){\n # build a BinaryData object for each subgroup\n bin.data.tmp <- get.subgroup.data.binary(binary.data, i, cov.vals)\n grouped.data[[count]] <- bin.data.tmp\n # collect raw data columns\n col3.nums <- c(col3.nums, bin.data.tmp@g1O1, sum(bin.data.tmp@g1O1)) \n col3.denoms <- c(col3.denoms, bin.data.tmp@g1O1 + bin.data.tmp@g1O2, sum(bin.data.tmp@g1O1 + bin.data.tmp@g1O2)) \n col4.nums <- c(col4.nums, bin.data.tmp@g2O1, sum(bin.data.tmp@g2O1)) \n col4.denoms <- c(col4.denoms, bin.data.tmp@g2O1 + bin.data.tmp@g2O2, sum(bin.data.tmp@g2O1 + bin.data.tmp@g2O2)) \n cur.res <- eval(call(fname, bin.data.tmp, params.tmp))\n cur.overall <- eval(call(paste(fname, \".overall\", sep=\"\"), cur.res))\n subgroup.results[[count]] <- cur.overall\n count <- count + 1\n }\n res <- eval(call(fname, binary.data, params.tmp))\n res.overall <- eval(call(paste(fname, \".overall\", sep=\"\"), res))\n grouped.data[[count]] <- binary.data\n subgroup.results[[count]] <- res.overall\n subgroup.names <- paste(\"Subgroup \", subgroup.list, sep=\"\")\n subgroup.names <- c(subgroup.names, \"Overall\")\n metric.name <- pretty.metric.name(as.character(params$measure))\n\tmodel.title <- switch(fname,\n\t\tbinary.fixed.inv.var = paste(\"Binary Fixed-effect Model - Inverse Variance\\n\\nMetric: \", metric.name, sep=\"\"),\n\t\tbinary.fixed.mh = paste(\"Binary Fixed-effect Model - Mantel Haenszel\\n\\nMetric: \", metric.name, sep=\"\"),\n\t\tbinary.fixed.peto = paste(\"Binary Fixed-effect Model - Peto\\n\\nMetric: \", metric.name, sep=\"\"),\n\t\tbinary.random = paste(\"Binary Random-Effects Model\\n\\nMetric: \", metric.name, sep=\"\"))\n\tvalue.info <- switch(fname,\n\t\tbinary.fixed.inv.var = binary.fixed.inv.var.value.info(),\n\t\tbinary.fixed.mh\t = binary.fixed.mh.value.info(),\n\t\tbinary.fixed.peto\t = binary.fixed.peto.value.info(),\n\t\tbinary.random\t = binary.random.value.info())\n subgroup.disp <- create.subgroup.display(subgroup.results, subgroup.names, params, model.title, data.type=\"binary\")\n forest.path <- paste(params$fp_outpath, sep=\"\")\n # pack up the data for forest plot.\n subgroup.data <- list(\"subgroup.list\"=subgroup.list, \"grouped.data\"=grouped.data, \"results\"=subgroup.results, \n \"col3.nums\"=col3.nums, \"col3.denoms\"=col3.denoms, \"col4.nums\"=col4.nums, \"col4.denoms\"=col4.denoms)\n plot.data <- create.subgroup.plot.data.binary(subgroup.data, params)\n changed.params <- plot.data$changed.params\n # list of changed params values\n params.changed.in.forest.plot <- forest.plot(forest.data=plot.data, outpath=forest.path)\n changed.params <- c(changed.params, params.changed.in.forest.plot)\n params[names(changed.params)] <- changed.params\n # update params values\n # we use the system time as our unique-enough string to store\n # the params object\n forest.plot.params.path <- save.data(binary.data, res, params, plot.data)\n # Now we package the results in a dictionary (technically, a named \n # vector). In particular, there are two fields that must be returned; \n # a dictionary of images (mapping titles to image paths) and a list of texts\n # (mapping titles to pretty-printed text). In this case we have only one \n # of each. \n # \n plot.params.paths <- c(\"Subgroup Forest Plot\"=forest.plot.params.path)\n images <- c(\"Subgroup Forest Plot\"=forest.path)\n plot.names <- c(\"subgroups forest plot\"=\"subgroups_forest_plot\")\n\treferences <- c(res$References, subgroup_ma_ref)\n results <- list(\"images\"=images,\n\t\t\t \"Subgroup Summary\"=subgroup.disp, \n \"plot_names\"=plot.names, \n \"plot_params_paths\"=plot.params.paths,\n\t\t\t\t\t\"References\"=references,\n\t\t\t\t\t\"res\" = construct.subgroup.res.output(subgroup.results),\n\t\t\t\t\t\"res.info\" = construct.subgroup.value.info(value.info, subgroup.list))\n results\n}\n\nget.subgroup.data.binary <- function(binary.data, cov.val, cov.vals) {\n # returns the subgroup data corresponding to a categorical covariant \n # for value cov.val\n if (!(\"BinaryData\" %in% class(binary.data))) stop(\"Binary data expected.\")\n y.tmp <- binary.data@y[cov.vals == cov.val]\n SE.tmp <- binary.data@SE[cov.vals == cov.val]\n names.tmp <- [email protected][cov.vals == cov.val]\n if (length(binary.data@g1O1) > 0){\n g1O1.tmp <- binary.data@g1O1[cov.vals == cov.val]\n g1O2.tmp <- binary.data@g1O2[cov.vals == cov.val]\n g2O1.tmp <- binary.data@g2O1[cov.vals == cov.val]\n g2O2.tmp <- binary.data@g2O2[cov.vals == cov.val]\n subgroup.data <- new('BinaryData', g1O1=g1O1.tmp, \n g1O2=g1O2.tmp, g2O1=g2O1.tmp, \n g2O2=g2O2.tmp, y=y.tmp, SE=SE.tmp, study.names=names.tmp)\n } else {\n subgroup.data <- new('BinaryData', y=y.tmp, SE=SE.tmp, study.names=names.tmp)\n }\n subgroup.data\n}\n\n#############################\n# continuous subgroup MA #\n#############################\n\nsubgroup.ma.continuous <- function(fname, cont.data, params){\n if (!(\"ContinuousData\" %in% class(cont.data))) stop(\"Continuous data expected.\")\n params.tmp <- params\n cov.name <- as.character(params$cov_name)\n selected.cov <- get.cov(cont.data, cov.name)\n cov.vals <- [email protected]\n #params$create.plot <- FALSE\n #params.tmp$write.to.file <- FALSE\n\tparams.tmp$supress.output <- TRUE\n subgroup.list <- unique(cov.vals)\n grouped.data <- array(list(NULL),c(length(subgroup.list)+1))\n subgroup.results <- array(list(NULL), c(length(subgroup.list)+1))\n col3.nums <- NULL\n col3.denoms <- NULL\n col4.nums <- NULL\n col4.denoms <- NULL\n count <- 1\n for (i in subgroup.list){\n # build a ContinuousData object \n cont.data.tmp <- get.subgroup.data.cont(cont.data, i, cov.vals) \n grouped.data[[count]] <- cont.data.tmp\n cur.res <- eval(call(fname, cont.data.tmp, params))\n cur.overall <- eval(call(paste(fname, \".overall\", sep=\"\"), cur.res))\n subgroup.results[[count]] <- cur.overall\n count <- count + 1\n }\n res <- eval(call(fname, cont.data, params))\n res.overall <- eval(call(paste(fname, \".overall\", sep=\"\"), res))\n grouped.data[[count]] <- cont.data\n subgroup.results[[count]] <- res.overall\n subgroup.names <- paste(\"Subgroup \", subgroup.list, sep=\"\")\n subgroup.names <- c(subgroup.names, \"Overall\")\n metric.name <- pretty.metric.name(as.character(params$measure))\n model.title <- switch(fname,\n\t\t\t\t\t\t continuous.fixed = paste(\"Continuous Fixed-effect Model - Inverse Variance\\n\\nMetric: \", metric.name, sep=\"\"),\n\t\t\t\t\t\t continuous.random = paste(\"Continuous Random-Effects Model\\n\\nMetric: \", metric.name, sep=\"\"))\n value.info <- switch(fname,\n\t\t\t\t\t\t continuous.fixed = continuous.fixed.value.info(),\n\t\t\t\t\t\t continuous.random = continuous.random.value.info())\t\t \n subgroup.disp <- create.overall.display(subgroup.results, subgroup.names, params, model.title, data.type=\"continuous\")\n forest.path <- paste(params$fp_outpath, sep=\"\")\n # pack up the data for forest plot.\n subgroup.data <- list(\"subgroup.list\"=subgroup.list, \"grouped.data\"=grouped.data, \"results\"=subgroup.results, \n \"col3.nums\"=col3.nums, \"col3.denoms\"=col3.denoms, \"col4.nums\"=col4.nums, \"col4.denoms\"=col4.denoms)\n plot.data <- create.subgroup.plot.data.cont(subgroup.data, params)\n changed.params <- plot.data$changed.params\n # list of changed params values\n params.changed.in.forest.plot <- forest.plot(forest.data=plot.data, outpath=forest.path)\n changed.params <- c(changed.params, params.changed.in.forest.plot)\n params[names(changed.params)] <- changed.params\n # update params values\n # we use the system time as our unique-enough string to store\n # the params object\n forest.plot.params.path <- save.data(cont.data, res, params, plot.data)\n # Now we package the results in a dictionary (technically, a named \n # vector). In particular, there are two fields that must be returned; \n # a dictionary of images (mapping titles to image paths) and a list of texts\n # (mapping titles to pretty-printed text). In this case we have only one \n # of each. \n # \n plot.params.paths <- c(\"Subgroups Forest Plot\"=forest.plot.params.path)\n images <- c(\"Subgroups Forest Plot\"=forest.path)\n plot.names <- c(\"subgroups forest plot\"=\"subgroups_forest_plot\")\n\t\n\treferences <- c(res$References, subgroup_ma_ref)\n\t\n results <- list(\"images\"=images,\n\t\t\t \"Subgroup Summary\"=subgroup.disp, \n \"plot_names\"=plot.names, \n \"plot_params_paths\"=plot.params.paths,\n\t\t\t\t\t\"References\"=references,\n\t\t\t\t\t\"res\" = construct.subgroup.res.output(subgroup.results),\n\t\t\t\t\t\"res.info\" = construct.subgroup.value.info(value.info, subgroup.list))\n results\n}\n\nget.subgroup.data.cont <- function(cont.data, cov.val, cov.vals) {\n # returns the subgroup data corresponding to a categorical covariant cov.name\n # and value cov.val\n if (!(\"ContinuousData\" %in% class(cont.data))) stop(\"Continuous data expected.\")\n y.tmp <- cont.data@y[cov.vals == cov.val]\n SE.tmp <- cont.data@SE[cov.vals == cov.val]\n names.tmp <- [email protected][cov.vals == cov.val]\n if (length(cont.data@N1) > 0){\n N1.tmp <- cont.data@N1[cov.vals == cov.val]\n mean1.tmp <- cont.data@mean1[cov.vals == cov.val]\n sd1.tmp <- cont.data@sd1[cov.vals == cov.val]\n N2.tmp <- cont.data@N2[cov.vals == cov.val]\n mean2.tmp <- cont.data@mean2[cov.vals == cov.val]\n sd2.tmp <- cont.data@sd2[cov.vals == cov.val]\n subgroup.data <- new('ContinuousData', \n N1=N1.tmp, mean1=mean1.tmp , sd1=sd1.tmp, \n N2=N2.tmp, mean2=mean2.tmp, sd2=sd2.tmp,\n y=y.tmp, SE=SE.tmp, \n study.names=names.tmp)\n } else {\n subgroup.data <- new('ContinuousData', \n y=y.tmp, SE=SE.tmp, \n study.names=names.tmp)\n }\n subgroup.data\n}\n\nget.cov <- function(om.data, cov.name) {\n # extracts the covariate with specified name from om.data\n covariate <- NULL\n count <- 1\n while ((count <= length(om.data@covariates)) & (is.null(covariate))) {\n if (om.data@covariates[[count]]@cov.name == cov.name) {\n covariate <- om.data@covariates[[count]]\n }\n count <- count + 1\n }\n covariate\n}\n\nupdate.plot.data.multiple <- function(binary.data, params, results) {\n\n scale.str <- \"standard\"\n if (metric.is.log.scale(as.character(params$measure))){\n scale.str <- \"log\"\n }\n transform.name <- \"binary.transform.f\"\n data.type <- \"binary\"\n plot.options <- extract.plot.options(params)\n if (!is.null(params$fp_display.lb)) {\n plot.options$display.lb <- eval(call(transform.name, params$measure))$calc.scale(params$fp_display.lb)\n }\n if (!is.null(params$fp_display.ub)) {\n plot.options$display.ub <- eval(call(transform.name, params$measure))$calc.scale(params$fp_display.ub)\n }\n if (!is.null(params$fp_show.summary.line)) {\n plot.options$show.summary.line <- params$fp_show_summary_line\n } else {\n plot.options$show.summary.line <- TRUE\n }\n # plot options passed in via params\n plot.data <- list(label = c(paste(params$fp_col1_str, sep = \"\"), [email protected], \"Overall\"),\n types = c(3, rep(0, length([email protected])), 2),\n scale = scale.str,\n data.type = data.type,\n overall =FALSE,\n options = plot.options)\n alpha <- 1.0-(params$conf.level/100.0)\n mult <- abs(qnorm(alpha/2.0))\n y.overall <- res$b[1]\n lb.overall <- res$ci.lb[1]\n ub.overall <- res$ci.ub[1]\n y <- binary.data@y\n lb <- y - mult*binary.data@SE\n ub <- y + mult*binary.data@SE\n\n y <- c(y, y.overall)\n lb <- c(lb, lb.overall)\n ub <- c(ub, ub.overall)\n\n # transform entries to display scale\n y.disp <- eval(call(transform.name, params$measure))$display.scale(y)\n lb.disp <- eval(call(transform.name, params$measure))$display.scale(lb)\n ub.disp <- eval(call(transform.name, params$measure))$display.scale(ub)\n\n if (params$fp_show_col2=='TRUE') {\n # format entries for text column in forest plot\n effect.size.col <- format.effect.size.col(y.disp, lb.disp, ub.disp, params)\n plot.data$additional.col.data$es <- effect.size.col\n }\n if (scale.str == \"log\") {\n # if metric is log scale, pass effect sizes in log scale.\n effects <- list(ES = y,\n LL = lb,\n UL = ub)\n } else {\n # otherwise pass effect sizes in standard scale\n effects <- list(ES = y.disp,\n LL = lb.disp,\n UL = ub.disp)\n }\n plot.data$effects <- effects\n # covariates\n if (!is.null(selected.cov)){\n cov.val.str <- paste(\"binary.data@covariates$\", selected.cov, sep=\"\")\n cov.values <- eval(parse(text=cov.val.str))\n plot.data$covariate <- list(varname = selected.cov,\n values = cov.values)\n }\n plot.data$fp_xlabel <- paste(params$fp_xlabel, sep = \"\")\n plot.data$fp_xticks <- params$fp_xticks\n plot.data\n}\n \n###################################################\n# leave-one-out diagnostic methods #\n###################################################\nmultiple.loo.diagnostic <- function(fnames, params.list, diagnostic.data) {\n\n # wrapper for applying leave-one-out method to multiple diagnostic functions and metrics \n\n ####\n # fnames -- names of diagnostic meta-analytic functions to call\n # params.list -- parameter lists to be passed along to the functions in\n # fnames\n # diagnostic.data -- the (diagnostic data) that is to be analyzed \n ###\n metrics <- c()\n results <- list()\n pretty.names <- diagnostic.fixed.inv.var.pretty.names()\n sens.spec.outpath <- c()\n\treferences <- c()\n for (count in 1:length(params.list)) {\n metrics <- c(metrics, params.list[[count]]$measure)\n if (params.list[[count]]$measure==\"Sens\") {\n sens.index <- count\n #sens.spec.outpath <- params.list[[count]]$fp_outpath\n }\n if (params.list[[count]]$measure==\"Spec\") {\n spec.index <- count\n #sens.spec.outpath <- params.list[[count]]$fp_outpath\n }\n if (params.list[[count]]$measure==\"PLR\") {\n plr.index <- count\n #if (params.list[[count]]$fp_outpath==sens.spec.outpath) {\n # for future use - check that path names are distinct. \n # params.list[[count]]$fp_outpath <- paste(sub(\".png\",\"\",sens.spec.outpath), \"1.png\", sep=\"\") \n # if fp_outpath is the same as for sens or spec, append a 1.\n #}\n }\n if (params.list[[count]]$measure==\"NLR\") {\n nlr.index <- count\n #if (params.list[[count]]$fp_outpath==sens.spec.outpath) {\n # params.list[[count]]$fp_outpath <- paste(sub(\".png\",\"\",sens.spec.outpath), \"1.png\", sep=\"\") \n # # if fp_outpath is the same as for sens or spec, append a 1.\n #}\n }\n }\n \n images <- c()\n plot.names <- c()\n plot.params.paths <- c()\n remove.indices <- c()\n\n if ((\"Sens\" %in% metrics) & (\"Spec\" %in% metrics)) {\n # create side-by-side forest plots for sens and spec.\n params.sens <- params.list[[sens.index]]\n params.spec <- params.list[[spec.index]]\n params.sens$create.plot <- FALSE\n params.sens$write.to.file <- FALSE\n params.spec$create.plot <- FALSE\n params.spec$write.to.file <- FALSE\n params.tmp <- list(\"left\"=params.sens, \"right\"=params.spec)\n \n fname <- fnames[sens.index]\n diagnostic.data.sens <- compute.diag.point.estimates(diagnostic.data, params.sens)\n diagnostic.data.spec <- compute.diag.point.estimates(diagnostic.data, params.spec)\n \n results.sens <- loo.ma.diagnostic(fname, diagnostic.data.sens, params.sens)\n results.spec <- loo.ma.diagnostic(fname, diagnostic.data.spec, params.spec)\n\n diagnostic.data.sens.spec <- list(\"left\"=diagnostic.data.sens, \"right\"=diagnostic.data.spec)\n \n summary.sens <- list(\"Summary\"=results.sens$Summary)\n names(summary.sens) <- paste(eval(parse(text=paste(\"pretty.names$measure$\", params.sens$measure,sep=\"\"))), \" Summary\", sep=\"\")\n summary.spec <- list(\"Summary\"=results.spec$Summary)\n names(summary.spec) <- paste(eval(parse(text=paste(\"pretty.names$measure$\", params.spec$measure,sep=\"\"))), \" Summary\", sep=\"\")\n results <- c(results, summary.sens, summary.spec)\n\t\t\n\t\treferences <- c(references, results.sens$Reference) # spec reference will be the same\n \n res.sens.spec <- list(\"left\"=results.sens$res, \"right\"=results.spec$res)\n plot.data <- create.loo.side.by.side.plot.data(diagnostic.data.sens.spec, params.tmp, res=res.sens.spec)\n \n forest.path <- paste(params.sens$fp_outpath, sep=\"\")\n two.forest.plots(plot.data, outpath=forest.path)\n \n forest.plot.params.path <- save.data(om.data=diagnostic.data.sens.spec, res.sens.spec, params=params.tmp, plot.data)\n plot.params.paths.tmp <- c(\"Sensitivity and Specificity Forest Plot\"=forest.plot.params.path)\n plot.params.paths <- c(plot.params.paths, plot.params.paths.tmp)\n \n images.tmp <- c(\"Sensitivity and Specificity Forest Plot\"=forest.path)\n images <- c(images, images.tmp)\n \n plot.names.tmp <- c(\"forest plot\"=\"forest.plot\")\n plot.names <- c(plot.names, plot.names.tmp)\n \n remove.indices <- c(sens.index, spec.index)\n }\n \n if ((\"NLR\" %in% metrics) & (\"PLR\" %in% metrics)) {\n # create side-by-side forest plots for NLR and PLR.\n params.nlr <- params.list[[nlr.index]]\n params.plr <- params.list[[plr.index]]\n params.nlr$create.plot <- FALSE\n params.nlr$write.to.file <- FALSE\n params.plr$create.plot <- FALSE\n params.plr$write.to.file <- FALSE\n params.tmp <- list(\"left\"=params.nlr, \"right\"=params.plr)\n \n fname <- fnames[nlr.index]\n diagnostic.data.nlr <- compute.diag.point.estimates(diagnostic.data, params.nlr)\n diagnostic.data.plr <- compute.diag.point.estimates(diagnostic.data, params.plr)\n results.nlr <- loo.ma.diagnostic(fname, diagnostic.data.nlr, params.nlr)\n results.plr <- loo.ma.diagnostic(fname, diagnostic.data.plr, params.plr)\n diagnostic.data.nlr.plr <- list(\"left\"=diagnostic.data.nlr, \"right\"=diagnostic.data.plr)\n \n\t\treferences <- c(references, results.nlr$References)\n\t\t\n\t\tsummary.nlr <- list(\"Summary\"=results.nlr$Summary)\n names(summary.nlr) <- paste(eval(parse(text=paste(\"pretty.names$measure$\", params.nlr$measure,sep=\"\"))), \" Summary\", sep=\"\")\n summary.plr <- list(\"Summary\"=results.plr$Summary)\n names(summary.plr) <- paste(eval(parse(text=paste(\"pretty.names$measure$\", params.plr$measure,sep=\"\"))), \" Summary\", sep=\"\")\n results <- c(results, summary.nlr, summary.plr)\n \n res.nlr.plr <- list(\"left\"=results.nlr$res, \"right\"=results.plr$res)\n plot.data <- create.loo.side.by.side.plot.data(diagnostic.data.nlr.plr, params.tmp, res=res.nlr.plr)\n \n forest.path <- paste(params.nlr$fp_outpath, sep=\"\")\n two.forest.plots(plot.data, outpath=forest.path)\n \n forest.plot.params.path <- save.data(diagnostic.data.nlr.plr, res.nlr.plr, params=params.tmp, plot.data)\n # @TODO: If you want to edit the plot, need to also \n plot.params.paths.tmp <- c(\"NLR and PLR Forest Plot\"=forest.plot.params.path)\n plot.params.paths <- c(plot.params.paths, plot.params.paths.tmp)\n \n images.tmp <- c(\"NLR and PLR Forest Plot\"=forest.path)\n images <- c(images, images.tmp)\n \n plot.names.tmp <- c(\"forest plot\"=\"forest.plot\")\n plot.names <- c(plot.names, plot.names.tmp)\n \n remove.indices <- c(remove.indices, nlr.index, plr.index)\n }\n\n # remove fnames and params for side-by-side plots\n fnames <- fnames[setdiff(1:length(fnames), remove.indices)]\n params.list <- params.list[setdiff(1:length(params.list), remove.indices)]\n\n if (length(params.list) > 0) {\n for (count in 1:length(params.list)) {\n # create ma summaries and single (not side-by-side) forest plots.\n #pretty.names <- eval(call(paste(fnames[count],\".pretty.names\",sep=\"\")))\n diagnostic.data.tmp <- compute.diag.point.estimates(diagnostic.data, params.list[[count]])\n results.tmp <- loo.ma.diagnostic(fnames[[count]], diagnostic.data.tmp, params.list[[count]])\n #if (is.null(params.list[[count]]$create.plot)) {\n # create plot\n images.tmp <- results.tmp$images\n names(images.tmp) <- paste(eval(parse(text=paste(\"pretty.names$measure$\",params.list[[count]]$measure,sep=\"\"))), \" Forest Plot\", sep=\"\")\n images <- c(images, images.tmp)\n plot.params.paths.tmp <- results.tmp$plot_params_paths\n names(plot.params.paths.tmp) <- paste(eval(parse(text=paste(\"pretty.names$measure$\", params.list[[count]]$measure,sep=\"\"))), \" Forest Plot\", sep=\"\")\n plot.params.paths <- c(plot.params.paths, plot.params.paths.tmp)\n plot.names <- c(plot.names, results.tmp$plot.names)\n #}\n summary.tmp <- list(\"Summary\"=results.tmp$Summary)\n names(summary.tmp) <- paste(eval(parse(text=paste(\"pretty.names$measure$\",params.list[[count]]$measure,sep=\"\"))), \" Summary\", sep=\"\")\n \n\t\t\treferences <- c(references, results.tmp$References)\n\t\t\t\n\t\t\tresults <- c(results, summary.tmp)\n }\n }\n results <- c(results, list(\"images\"=images,\n\t\t\t\t\t \"plot_names\"=plot.names, \n \"plot_params_paths\"=plot.params.paths,\n\t\t\t\t\t\t\t \"References\"=references))\n #results$images <- images\n #results$plot.names <- plot.names\n #results$plot.params.paths <- plot.params.paths\n results\n}\n\nloo.ma.diagnostic <- function(fname, diagnostic.data, params){\n # performs a single leave-one-out meta-analysis for diagnostic.data\n # assert that the argument is the correct type\n if (!(\"DiagnosticData\" %in% class(diagnostic.data))) stop(\"Diagnostic data expected.\")\n loo.results <- array(list(NULL), dim=c(length([email protected])))\n params.tmp <- params\n params.tmp$create.plot <- FALSE\n params.tmp$write.to.file <- FALSE\n res <- eval(call(fname, diagnostic.data, params.tmp))\n res.overall <- eval(call(paste(fname, \".overall\", sep=\"\"), res))\n N <- length([email protected])\n for (i in 1:N){\n # get a list of indices, i.e., the subset\n # that is 1:N with i left out\n index.ls <- setdiff(1:N, i)\n \n # build a DiagnosticData object with the \n # ith study removed. \n y.tmp <- diagnostic.data@y[index.ls]\n SE.tmp <- diagnostic.data@SE[index.ls]\n names.tmp <- [email protected][index.ls]\n diag.data.tmp <- NULL\n \n if (length(diagnostic.data@TP) > 0){\n # if we have group level data for \n # group 1, outcome 1, then we assume\n # we have it for all groups\n TP.tmp <- diagnostic.data@TP[index.ls]\n FN.tmp <- diagnostic.data@FN[index.ls]\n TN.tmp <- diagnostic.data@TN[index.ls]\n FP.tmp <- diagnostic.data@FP[index.ls]\n diag.data.tmp <- new('DiagnosticData', TP=TP.tmp, \n FN=FN.tmp , TN=TN.tmp, \n FP=FP.tmp, y=y.tmp, SE=SE.tmp, study.names=names.tmp)\n } else{\n diag.data.tmp <- new('DiagnosticData', y=y.tmp, SE=SE.tmp, study.names=names.tmp)\n }\n # call the parametric function by name, passing along the \n # data and parameters. Notice that this method knows\n # neither what method its calling nor what parameters\n # it's passing!\n cur.res <- eval(call(fname, diag.data.tmp, params.tmp))\n cur.overall <- eval(call(paste(fname, \".overall\", sep=\"\"), cur.res))\n loo.results[[i]] <- cur.overall\n }\n loo.results <- c(list(res.overall), loo.results)\n # Add overall results\n study.names <- c(\"Overall\", paste(\"- \", [email protected], sep=\"\"))\n metric.name <- pretty.metric.name(as.character(params$measure))\n\tmodel.title <- switch(fname,\n\t\t\tdiagnostic.fixed = paste(\"Diagnostic Fixed-effect Model - Inverse Variance\\n\\nMetric: \", metric.name, sep=\"\"),\n\t\t\tdiagnostic.random = paste(\"Diagnostic Random-Effects Model\\n\\nMetric: \", metric.name, sep=\"\"))\n loo.disp <- create.overall.display(res=loo.results, study.names, params, model.title, data.type=\"diagnostic\")\n \n if (is.null(params$create.plot)) {\n plot.data <- create.plot.data.loo(diagnostic.data, params, res=loo.results)\n forest.path <- paste(params$fp_outpath, sep=\"\")\n changed.params <- plot.data$changed.params\n # list of changed params values\n params.changed.in.forest.plot <- forest.plot(forest.data=plot.data, outpath=forest.path)\n changed.params <- c(changed.params, params.changed.in.forest.plot)\n params[names(changed.params)] <- changed.params\n # update params values\n # we use the system time as our unique-enough string to store\n # the params object\n forest.plot.params.path <- save.data(diagnostic.data, res=loo.results, params, plot.data)\n #\n # Now we package the results in a dictionary (technically, a named \n # vector). In particular, there are two fields that must be returned; \n # a dictionary of images (mapping titles to image paths) and a list of texts\n # (mapping titles to pretty-printed text). In this case we have only one \n # of each. \n # \n plot.params.paths <- c(\"Forest Plot\"=forest.plot.params.path)\n images <- c(\"Leave-one-out Forest plot\"=forest.path)\n plot.names <- c(\"loo forest plot\"=\"loo_forest_plot\")\n results <- list(\"images\"=images, \"Summary\"=loo.disp, \n \"plot_names\"=plot.names, \n \"plot_params_paths\"=plot.params.paths)\n } else {\n results <- list(res=loo.results, res.overall=res.overall, Summary=loo.disp) \n } \n\t\n\treferences <- c(res$References, loo_ma_ref)\n\tresults[[\"References\"]] <- references\n results\n}\n\ncreate.loo.side.by.side.plot.data <- function(diagnostic.data, params, res) { \n # creates data for two side-by-side leave-one-out forest plots\n params.left <- params$left\n params.right <- params$right\n params.left$fp_show_col1 <- 'TRUE'\n params.right$fp_show_col1 <- 'FALSE'\n # only show study names on the left plot\n res.left <- res$left\n res.right <- res$right \n diagnostic.data.left <- diagnostic.data$left\n diagnostic.data.right <- diagnostic.data$right\n study.names <- c(\"Overall\", paste(\"- \", [email protected], sep=\"\"))\n plot.data.left <- create.plot.data.loo(diagnostic.data.left, params.left, res.left)\n plot.data.left$options$fp.title <- pretty.metric.name(as.character(params.left$measure))\n plot.data.right <- create.plot.data.loo(diagnostic.data.right, params.right, res.right)\n plot.data.right$options$fp.title <- pretty.metric.name(as.character(params.right$measure))\n plot.data <- list(\"left\"=plot.data.left, \"right\"=plot.data.right)\n plot.data\n}\n\n#################################\n# subgroup diagnostic methods #\n#################################\nmultiple.subgroup.diagnostic <- function(fnames, params.list, diagnostic.data) {\n\n # wrapper for applying subgroup method to multiple diagnostic functions and metrics \n\n ####\n # fnames -- list of names of diagnostic meta-analytic functions to call\n # params.list -- list parameter lists to be passed along to the functions in\n # fnames\n # diagnostic.data -- the (diagnostic data) that is to be analyzed \n ###\n metrics <- c()\n results <- list()\n pretty.names <- diagnostic.fixed.inv.var.pretty.names()\n sens.spec.outpath <- c()\n for (count in 1:length(params.list)) {\n metrics <- c(metrics, params.list[[count]]$measure)\n if (params.list[[count]]$measure==\"Sens\") {\n sens.index <- count\n #sens.spec.outpath <- params.list[[count]]$fp_outpath\n }\n if (params.list[[count]]$measure==\"Spec\") {\n spec.index <- count\n #sens.spec.outpath <- params.list[[count]]$fp_outpath\n }\n if (params.list[[count]]$measure==\"PLR\") {\n plr.index <- count\n #if (params.list[[count]]$fp_outpath==sens.spec.outpath) {\n # for future use - check that path names are distinct. \n # params.list[[count]]$fp_outpath <- paste(sub(\".png\",\"\",sens.spec.outpath), \"1.png\", sep=\"\") \n # if fp_outpath is the same as for sens or spec, append a 1.\n #}\n }\n if (params.list[[count]]$measure==\"NLR\") {\n nlr.index <- count\n #if (params.list[[count]]$fp_outpath==sens.spec.outpath) {\n # params.list[[count]]$fp_outpath <- paste(sub(\".png\",\"\",sens.spec.outpath), \"1.png\", sep=\"\") \n # # if fp_outpath is the same as for sens or spec, append a 1.\n #}\n }\n }\n cov.name <- as.character(params.list[[1]]$cov_name)\n selected.cov <- get.cov(diagnostic.data, cov.name)\n images <- c()\n plot.names <- c()\n plot.params.paths <- c()\n remove.indices <- c()\n\treferences <- c()\n\n if ((\"Sens\" %in% metrics) & (\"Spec\" %in% metrics)) {\n # create side-by-side subgroup forest plots for sens and spec.\n \n params.sens <- params.list[[sens.index]]\n params.spec <- params.list[[spec.index]]\n params.sens$create.plot <- FALSE\n params.spec$create.plot <- FALSE\n params.tmp <- list(\"left\"=params.sens, \"right\"=params.spec)\n \n fname <- fnames[sens.index]\n diagnostic.data.sens <- compute.diag.point.estimates(diagnostic.data, params.sens)\n diagnostic.data.spec <- compute.diag.point.estimates(diagnostic.data, params.spec)\n \n results.sens <- subgroup.ma.diagnostic(fname, diagnostic.data.sens, params.sens, selected.cov)\n results.spec <- subgroup.ma.diagnostic(fname, diagnostic.data.spec, params.spec, selected.cov)\n\t\tdiagnostic.data.sens.spec <- list(\"left\"=diagnostic.data.sens, \"right\"=diagnostic.data.spec) ##\n\t\t\n subgroup.data.sens <- results.sens$subgroup.data\n subgroup.data.spec <- results.spec$subgroup.data\n subgroup.data.all <- list(\"left\"=subgroup.data.sens, \"right\"=subgroup.data.spec)\n\t\t\n\t\treferences <- c(references, results.sens$Reference) # spec reference will be the same\n \n summary.sens <- list(\"Summary\"=results.sens$Summary)\n names(summary.sens) <- paste(eval(parse(text=paste(\"pretty.names$measure$\", params.sens$measure,sep=\"\"))), \" Summary\", sep=\"\")\n summary.spec <- list(\"Summary\"=results.spec$Summary)\n names(summary.spec) <- paste(eval(parse(text=paste(\"pretty.names$measure$\", params.spec$measure,sep=\"\"))), \" Summary\", sep=\"\")\n results <- c(results, summary.sens, summary.spec)\n\t\t\n\t\tres.sens.spec <- list(\"left\"=results.sens$res, \"right\"=results.spec$res) ##\n\t\t\n #res.sens <- results.sens$res\n #res.spec <- results.spec$res\n #res <- list(\"left\"=res.sens, \"right\"=res.spec)\n \n plot.data <- create.subgroup.side.by.side.plot.data(subgroup.data.all, params=params.tmp)\n \n forest.path <- paste(params.sens$fp_outpath, sep=\"\")\n two.forest.plots(plot.data, outpath=forest.path)\n \n ######forest.plot.params.path <- save.data(subgroup.data.all, params=params.tmp)\n\t\tforest.plot.params.path <- save.data(om.data=diagnostic.data.sens.spec, res.sens.spec, params=params.tmp, plot.data)\n plot.params.paths.tmp <- c(\"Sensitivity and Specificity Forest Plot\"=forest.plot.params.path)\n plot.params.paths <- c(plot.params.paths, plot.params.paths.tmp)\n \n images.tmp <- c(\"Sensitivity and Specificity Forest Plot\"=forest.path)\n images <- c(images, images.tmp)\n \n plot.names.tmp <- c(\"forest plot\"=\"forest.plot\")\n plot.names <- c(plot.names, plot.names.tmp)\n \n remove.indices <- c(sens.index, spec.index)\n }\n \n if ((\"NLR\" %in% metrics) & (\"PLR\" %in% metrics)) {\n # create side-by-side forest plots for NLR and PLR.\n params.nlr <- params.list[[nlr.index]]\n params.plr <- params.list[[plr.index]]\n params.nlr$create.plot <- FALSE\n params.plr$create.plot <- FALSE\n params.tmp <- list(\"left\"=params.nlr, \"right\"=params.plr)\n \n fname <- fnames[nlr.index]\n diagnostic.data.nlr <- compute.diag.point.estimates(diagnostic.data, params.nlr)\n diagnostic.data.plr <- compute.diag.point.estimates(diagnostic.data, params.plr)\n \n results.nlr <- subgroup.ma.diagnostic(fname, diagnostic.data.nlr, params.nlr, selected.cov)\n results.plr <- subgroup.ma.diagnostic(fname, diagnostic.data.plr, params.plr, selected.cov)\n\t\tdiagnostic.data.nlr.plr <- list(\"left\"=diagnostic.data.nlr, \"right\"=diagnostic.data.plr) ###\n\t\t\n subgroup.data.nlr <- results.nlr$subgroup.data\n subgroup.data.plr <- results.plr$subgroup.data\n subgroup.data.all <- list(\"left\"=subgroup.data.nlr, \"right\"=subgroup.data.plr)\n\t\t\n\t\treferences <- c(references, results.nlr$References)\n \n summary.nlr <- list(\"Summary\"=results.nlr$Summary)\n names(summary.nlr) <- paste(eval(parse(text=paste(\"pretty.names$measure$\", params.nlr$measure,sep=\"\"))), \" Summary\", sep=\"\")\n summary.plr <- list(\"Summary\"=results.plr$Summary)\n names(summary.plr) <- paste(eval(parse(text=paste(\"pretty.names$measure$\", params.plr$measure,sep=\"\"))), \" Summary\", sep=\"\")\n results <- c(results, summary.nlr, summary.plr)\n\t\t\n\t\tres.nlr.plr <- list(\"left\"=results.nlr$res, \"right\"=results.plr$res) ##\n \n #res.nlr <- results.nlr$res\n #res.plr <- results.plr$res\n #res <- list(\"left\"=res.nlr, \"right\"=res.plr)\n \n plot.data <- create.subgroup.side.by.side.plot.data(subgroup.data.all, params.tmp)\n \n forest.path <- paste(params.nlr$fp_outpath, sep=\"\")\n two.forest.plots(plot.data, outpath=forest.path)\n \n\t\tforest.plot.params.path <- save.data(diagnostic.data.nlr.plr, res.nlr.plr, params=params.tmp, plot.data)\n ######forest.plot.params.path <- save.data(subgroup.data.all, params=params.tmp)\n plot.params.paths.tmp <- c(\"NLR and PLR Forest Plot\"=forest.plot.params.path)\n plot.params.paths <- c(plot.params.paths, plot.params.paths.tmp)\n \n images.tmp <- c(\"NLR and PLR Forest Plot\"=forest.path)\n images <- c(images, images.tmp)\n \n plot.names.tmp <- c(\"forest plot\"=\"forest.plot\")\n plot.names <- c(plot.names, plot.names.tmp)\n \n remove.indices <- c(remove.indices, nlr.index, plr.index)\n }\n\n # remove fnames and params for side-by-side plots\n fnames <- fnames[setdiff(1:length(fnames), remove.indices)]\n params.list <- params.list[setdiff(1:length(params.list), remove.indices)]\n\n if (length(params.list) > 0) {\n for (count in 1:length(params.list)) {\n # create ma summaries and single (not side-by-side) forest plots.\n #pretty.names <- eval(call(paste(fnames[count],\".pretty.names\",sep=\"\")))\n diagnostic.data.tmp <- compute.diag.point.estimates(diagnostic.data, params.list[[count]])\n results.tmp <- subgroup.ma.diagnostic(fnames[[count]], diagnostic.data.tmp, params.list[[count]], selected.cov)\n if (is.null(params.list[[count]]$create.plot)) {\n # create plot\n images.tmp <- results.tmp$images\n names(images.tmp) <- paste(eval(parse(text=paste(\"pretty.names$measure$\",params.list[[count]]$measure,sep=\"\"))), \" Forest Plot\", sep=\"\")\n images <- c(images, images.tmp)\n plot.params.paths.tmp <- results.tmp$plot_params_paths\n names(plot.params.paths.tmp) <- paste(eval(parse(text=paste(\"pretty.names$measure$\", params.list[[count]]$measure,sep=\"\"))), \" Forest Plot\", sep=\"\")\n plot.params.paths <- c(plot.params.paths, plot.params.paths.tmp)\n plot.names <- c(plot.names, results.tmp$plot_names)\n }\n summary.tmp <- list(\"Summary\"=results.tmp$Summary)\n names(summary.tmp) <- paste(eval(parse(text=paste(\"pretty.names$measure$\",params.list[[count]]$measure,sep=\"\"))), \" Summary\", sep=\"\")\n \n\t\t\treferences <- c(references, results.tmp$References)\n\t\t\t\n\t\t\tresults <- c(results, summary.tmp)\n }\n }\n results <- c(results, list(\"images\"=images,\n\t\t\t\t\t \"plot_names\"=plot.names, \n \"plot_params_paths\"=plot.params.paths,\n\t\t\t\t\t\t\t \"References\"=references))\n results\n}\n\nsubgroup.ma.diagnostic <- function(fname, diagnostic.data, params, selected.cov){\n # performs a single subgroup meta-analysis for diagnostic.data\n if (!(\"DiagnosticData\" %in% class(diagnostic.data))) stop(\"Diagnostic data expected.\")\n #cov.name <- as.character(params$cov_name)\n #selected.cov <- get.cov(diagnostic.data, cov.name)\n cov.vals <- [email protected]\n params.tmp <- params\n params.tmp$create.plot <- FALSE\n params.tmp$write.to.file <- FALSE\n subgroup.list <- unique(cov.vals)\n grouped.data <- array(list(NULL),c(length(subgroup.list) + 1))\n subgroup.results <- array(list(NULL), c(length(subgroup.list) + 1))\n col3.nums <- NULL\n col3.denoms <- NULL\n col4.nums <- NULL\n col4.denoms <- NULL\n count <- 1\n for (i in subgroup.list){\n # build a DiagnosticData object \n diag.data.tmp <- get.subgroup.data.diagnostic(diagnostic.data, i, cov.vals)\n grouped.data[[count]] <- diag.data.tmp\n # collect raw data columns\n raw.data <- list(\"TP\"=diag.data.tmp@TP, \"FN\"=diag.data.tmp@FN, \"TN\"=diag.data.tmp@TN, \"FP\"=diag.data.tmp@FP)\n terms <- compute.diagnostic.terms(raw.data, params.tmp)\n col3.nums <- c(col3.nums, terms$numerator, sum(terms$numerator))\n col3.denoms <- c(col3.denoms, terms$denominator, sum(terms$denominator))\n cur.res <- eval(call(fname, diag.data.tmp, params.tmp))\n cur.overall <- eval(call(paste(fname, \".overall\", sep=\"\"), cur.res))\n subgroup.results[[count]] <- cur.overall\n count <- count + 1\n }\n res <- eval(call(fname, diagnostic.data, params.tmp))\n res.overall <- eval(call(paste(fname, \".overall\", sep=\"\"), res))\n grouped.data[[count]] <- diagnostic.data\n subgroup.results[[count]] <- res.overall\n subgroup.names <- paste(\"Subgroup \", subgroup.list, sep=\"\")\n subgroup.names <- c(subgroup.names, \"Overall\")\n \n metric.name <- pretty.metric.name(params.tmp$measure)\n model.title <- switch(fname,\n diagnostic.fixed = paste(\"Diagnostic Fixed-effect Model - Inverse Variance\\n\\nMetric: \", metric.name, sep=\"\"),\n diagnostic.random = paste(\"Diagnostic Random-Effects Model\\n\\nMetric: \", metric.name, sep=\"\"))\n subgroup.disp <- create.subgroup.display(subgroup.results, subgroup.names, params, model.title, data.type=\"diagnostic\")\n forest.path <- paste(params$fp_outpath, sep=\"\")\n # pack up the data for forest plot.\n subgroup.data <- list(\"subgroup.list\"=subgroup.list, \"grouped.data\"=grouped.data, \"results\"=subgroup.results, \n \"col3.nums\"=col3.nums, \"col3.denoms\"=col3.denoms, \"col4.nums\"=col4.nums, \"col4.denoms\"=col4.denoms)\n if (is.null(params$create.plot)) {\n plot.data <- create.subgroup.plot.data.diagnostic(subgroup.data, params)\n changed.params <- plot.data$changed.params\n # list of changed params values\n params.changed.in.forest.plot <- forest.plot(forest.data=plot.data, outpath=forest.path)\n changed.params <- c(changed.params, params.changed.in.forest.plot)\n params[names(changed.params)] <- changed.params\n # update params values\n # we use the system time as our unique-enough string to store\n # the params object\n forest.plot.params.path <- save.data(diagnostic.data, res, params, plot.data)\n # Now we package the results in a dictionary (technically, a named \n # vector). In particular, there are two fields that must be returned; \n # a dictionary of images (mapping titles to image paths) and a list of texts\n # (mapping titles to pretty-printed text). In this case we have only one \n # of each. \n # \n plot.params.paths <- c(\"Forest Plot\"=forest.plot.params.path)\n images <- c(\"Subgroups Forest Plot\"=forest.path)\n plot.names <- c(\"subgroups forest plot\"=\"subgroups_forest_plot\")\n results <- list(\"images\"=images, \"Summary\"=subgroup.disp, \n \"plot_names\"=plot.names, \n \"plot_params_paths\"=plot.params.paths)\n } else {\n results <- list(subgroup.data=subgroup.data, Summary=subgroup.disp, \"cov.list\"=subgroup.list)\n }\n\t\n\treferences <- c(res$References, subgroup_ma_ref)\n\tresults[[\"References\"]] <- references\n\t\n results\n}\n\nget.subgroup.data.diagnostic <- function(diagnostic.data, cov.val, cov.vals) {\n # returns the subgroup data corresponding to a categorical covariant cov.name\n # and value cov.val\n if (!(\"DiagnosticData\" %in% class(diagnostic.data))) stop(\"Diagnostic data expected.\")\n y.tmp <- diagnostic.data@y[cov.vals == cov.val]\n SE.tmp <- diagnostic.data@SE[cov.vals == cov.val]\n names.tmp <- [email protected][cov.vals == cov.val]\n if (length(diagnostic.data@TP) > 0){\n TP.tmp <- diagnostic.data@TP[cov.vals==cov.val]\n FN.tmp <- diagnostic.data@FN[cov.vals==cov.val]\n TN.tmp <- diagnostic.data@TN[cov.vals==cov.val]\n FP.tmp <- diagnostic.data@FP[cov.vals==cov.val]\n subgroup.data <- new('DiagnosticData', TP=TP.tmp, \n FN=FN.tmp , TN=TN.tmp, \n FP=FP.tmp, y=y.tmp, SE=SE.tmp, study.names=names.tmp)\n } else {\n subgroup.data <- new('DiagnosticData', y=y.tmp, SE=SE.tmp, study.names=names.tmp)\n }\n subgroup.data\n}\n\ncreate.subgroup.side.by.side.plot.data <- function(subgroup.data, params) { \n # creates data for two side-by-side forest plots\n params.left <- params$left\n params.right <- params$right\n params.left$fp_show_col1 <- 'TRUE'\n params.right$fp_show_col1 <- 'FALSE'\n # only show study names on the left plot\n subgroup.data.left <- subgroup.data$left\n subgroup.data.right <- subgroup.data$right\n \n plot.data.left <- create.subgroup.plot.data.diagnostic(subgroup.data.left, params.left)\n plot.data.left$options$fp.title <- pretty.metric.name(as.character(params.left$measure))\n \n plot.data.right <- create.subgroup.plot.data.diagnostic(subgroup.data.right, params.right)\n plot.data.right$options$fp.title <- pretty.metric.name(as.character(params.right$measure))\n \n plot.data <- list(\"left\"=plot.data.left, \"right\"=plot.data.right)\n plot.data\n}" }, { "alpha_fraction": 0.4591836631298065, "alphanum_fraction": 0.47959184646606445, "avg_line_length": 14.333333015441895, "blob_id": "08a6b0ed8c6c2e820c07ab8b21d0541f3408c960", "content_id": "8dcd510e5209d862520fab4347f99876d59e4982", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 98, "license_type": "no_license", "max_line_length": 33, "num_lines": 6, "path": "/src/R/HSROC/R/B.eij.fonction.R", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "B.eij.fonction <-\r\nfunction (a, b) \r\n{\r\n result = sum(a[, 1] * a[, 2])\r\n return(result)\r\n}\r\n" }, { "alpha_fraction": 0.28421053290367126, "alphanum_fraction": 0.3105263113975525, "avg_line_length": 12.615385055541992, "blob_id": "d79d1c8342ea56da7183decefefef0cc0a640886", "content_id": "b040b382a77bab608e8d162a20a49dd4c9fc3165", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 190, "license_type": "no_license", "max_line_length": 30, "num_lines": 13, "path": "/src/R/HSROC/R/beta.condition.R", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "beta.condition <-\r\nfunction (a, b) \r\n{\r\n if (a == 1.5 & b == 1.5) {\r\n a = b = 1\r\n }\r\n else {\r\n a = a\r\n b = b\r\n }\r\n a_b = rbind(a, b)\r\n return(a_b)\r\n}\r\n" }, { "alpha_fraction": 0.5595442056655884, "alphanum_fraction": 0.5604937672615051, "avg_line_length": 44.884361267089844, "blob_id": "9f2b12cd4ac3782fb63d4129f0a486319bfb2041", "content_id": "70360a508bcadba8240fdf479a898a1664f0c6b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 48443, "license_type": "no_license", "max_line_length": 154, "num_lines": 1055, "path": "/src/ma_dataset.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "#############################################################################################\n# # \n# Byron C. Wallace # \n# George Dietz #\n# CEBM @ Brown # \n# OpenMeta[analyst] # \n# # \n# Dataset module; a roll your own back end. This is a model for manipulating # \n# datasets. Note that *no QT lives here*, i.e., it is divorced from the UI entirely. #\n# # \n# The structure is as follows: A Dataset object holds a list of Study objects. # \n# These Study objects in turn contain a dictionary, mapping outcome names # \n# to another dictionary, which maps follow ups (time points) to MA_Unit # \n# objects. Finally, these MA_Unit objects in turn map treatment names # \n# - or groups (e.g., 'control', 'aspirin') - to raw data. Further, at the MA_Unit level, # \n# metrics (e.g., \"OR\") map to dictionaries containing that metric as computed for # \n# the pairwise combinations of the groups/treatments (e.g., OR->\"AvB\"=x) # \n# # \n#############################################################################################\nimport pdb\nfrom PyQt4.QtCore import pyqtRemoveInputHook\nimport copy\n\nimport two_way_dict\nimport meta_globals\n#from meta_globals import *\nimport meta_py_r\n\nBINARY = meta_globals.BINARY\nCONTINUOUS = meta_globals.CONTINUOUS\nDIAGNOSTIC = meta_globals.DIAGNOSTIC\nEMPTY_VALS = meta_globals.EMPTY_VALS\nFACTOR = meta_globals.FACTOR\nTYPE_TO_STR_DICT = meta_globals.TYPE_TO_STR_DICT\n\nclass Dataset:\n def __len__(self):\n return len(self.studies)\n \n def __init__(self, title=None, is_diag=False, summary=None):\n self.title = title\n self.summary = summary\n self.studies = []\n self.is_diag = is_diag\n self.num_outcomes = 0\n self.num_follow_ups = 0\n self.outcome_names_to_follow_ups = {}\n self.num_treatments = 0\n\n self.notes = \"\"\n \n # this will hold a list of covariate objects. each study will\n # have a dictionary with values for that study corresponding\n # to each of the covariate objects here.\n self.covariates = []\n\n def copy(self):\n cloned = Dataset(self.title, self.summary)\n cloned.studies = list(self.studies)\n cloned.outcome_names_to_follow_ups = copy.deepcopy(self.outcome_names_to_follow_ups)\n return cloned\n \n def get_outcome_names(self):\n return sorted(self.outcome_names_to_follow_ups.keys())\n\n def change_group_name(self, old_group_name, new_group_name, outcome=None, follow_up=None):\n if (outcome is None and follow_up is not None) or (follow_up is None and outcome is not None):\n raise Exception, \"dataset -- change_group_name -- either both outcome and follow_up should be None, \\\n or else neither should.\"\n \n for study in self.studies:\n if outcome is None and follow_up is None:\n # if no outcome/follow-up was specified, we change *all* occurrences of\n # the old_group_name to the new_group_name\n for outcome_name in study.outcomes_to_follow_ups.keys():\n cur_outcome = study.outcomes_to_follow_ups[outcome_name]\n for ma_unit in cur_outcome.values(): \n ma_unit.rename_group(old_group_name, new_group_name)\n else:\n ma_unit = study.outcomes_to_follow_ups[outcome][follow_up]\n ma_unit.rename_group(old_group_name, new_group_name)\n \n def change_outcome_name(self, old_outcome_name, new_outcome_name):\n self.outcome_names_to_follow_ups[new_outcome_name] = \\\n self.outcome_names_to_follow_ups.pop(old_outcome_name)\n for study in self.studies:\n study.outcomes_to_follow_ups[new_outcome_name] = \\\n study.outcomes_to_follow_ups.pop(old_outcome_name)\n for outcome in study.outcomes:\n if outcome.name == old_outcome_name:\n outcome.name = new_outcome_name\n\n def delete_group(self, group_name):\n study = self.studies[0]\n for study in self.studies:\n for outcome_name in study.outcomes_to_follow_ups.keys():\n cur_outcome = study.outcomes_to_follow_ups[outcome_name]\n for ma_unit in cur_outcome.values(): \n ma_unit.remove_group(group_name) \n\n def add_study(self, study, study_index=None):\n # note that we allow empty outcomes/follow-ups, but handle\n # this at the point of execution\n if study_index is None:\n self.studies.append(study)\n # the else clause was somehow removed (!!!)\n # triggering issue #91\n else:\n self.studies.insert(study_index, study)\n \n def remove_study(self, studyid):\n self.studies = [study for study in self.studies if study.id != studyid]\n \n def num_studies(self):\n return len(self.studies)\n \n def get_outcome_type(self, outcome_name, get_string=False):\n outcome = self.get_outcome_obj(outcome_name)\n if outcome is None: \n return None\n return outcome.data_type if not get_string else TYPE_TO_STR_DICT[outcome.data_type]\n def get_outcome_subtype(self, outcome_name):\n outcome = self.get_outcome_obj(outcome_name)\n if outcome is None or not hasattr(outcome, 'sub_type'):\n return None\n return outcome.sub_type\n \n def get_outcome_obj(self, outcome_name):\n for study in self.studies:\n outcome_obj = study.get_outcome(outcome_name)\n if outcome_obj is not None:\n return outcome_obj\n return None\n \n def max_study_id(self):\n if len(self.studies) == 0:\n return -1\n return max([study.id for study in self.studies])\n\n def remove_covariate(self, covariate):\n cov_index = None # keep record of the remvoed covariate's index.\n # first remove the covariate from the list of \n # covariate objects for this dataset\n for i,cov in enumerate(self.covariates):\n if cov.name == covariate.name:\n self.covariates.remove(cov)\n cov_index = i\n break\n # now remove the covariate from all of the studies\n # in the dataset\n for study in self.studies:\n if covariate.name in study.covariate_dict:\n study.covariate_dict.pop(covariate.name)\n return cov_index\n \n def add_covariate(self, covariate, cov_values=None, cov_index=None):\n ''' \n adds the parametric covariate to: 1) the list of covariate objects\n associated with this dataset and 2) the covariate dictionaries of each\n of the studies this dataset contains. Note: the covariate argument\n needs to be a Covariate object (not a string)!\n '''\n if cov_index is None:\n self.covariates.append(covariate)\n else:\n self.covariates.insert(cov_index, covariate)\n\n if cov_values is None:\n for study in self.studies:\n study.covariate_dict[covariate.name] = None\n else:\n # in this case, a dictionary mapping studies to \n # values for this covariate was passed in.\n # this will occur in this case, e.g., that a \n # covariate was removed from the dataset, but then\n # the user clicked 'redo' -- we want to repopulate\n # the dataset with the previous covariate values.\n for study in self.studies:\n if cov_values.has_key(study.name):\n study.covariate_dict[covariate.name] = cov_values[study.name]\n else:\n study.covariate_dict[covariate.name] = None\n \n \n def change_covariate_name(self, old_covariate, new_covariate_name):\n # get the values for this covariate for all studies\n cov_val_dict = copy.deepcopy(self.get_values_for_cov(old_covariate.name))\n cov_index = self.remove_covariate(old_covariate)\n # now add a covariate with the same values, but new name\n # note that we also insert the covariate into the same place that\n # it previously occupied!\n self.add_covariate(Covariate(new_covariate_name, TYPE_TO_STR_DICT[old_covariate.data_type]),\\\n cov_values=cov_val_dict, cov_index=cov_index)\n \n\n \n def get_cov_obj_from_name(self, cov_name):\n for cov in self.covariates:\n if cov.name == cov_name:\n return cov\n \n \n def ids_to_study_names(self):\n ids_to_names = {}\n for study in self.studies:\n ids_to_names[study.id] = study.name\n return ids_to_names\n\n def get_values_for_cov(self, covariate, ids_for_keys=False):\n ''' \n returns a dictionary mapping study names to values for \n the given covariate -- BEWARE these (study names) aren't \n necessarily unique! safer to set the ids_for_keys flag.\n '''\n cov_name = covariate\n if isinstance(covariate, Covariate):\n cov_name = covariate.name\n cov_d = {}\n for study in self.studies: \n if study.covariate_dict.has_key(cov_name) and \\\n study.covariate_dict[cov_name] is not None:\n if ids_for_keys:\n cov_d[study.id] = study.covariate_dict[cov_name]\n else:\n cov_d[study.name] = study.covariate_dict[cov_name]\n return cov_d\n \n def get_cov_names(self):\n return [cov.name for cov in self.covariates]\n \n def add_outcome(self, outcome):\n cur_group_names = self.get_group_names()\n if len(cur_group_names) == 0:\n cur_group_names = None\n \n follow_up = \"first\"\n self.outcome_names_to_follow_ups[outcome.name] = two_way_dict.TwoWayDict()\n self.outcome_names_to_follow_ups[outcome.name][0] = follow_up\n \n for study in self.studies:\n study.add_outcome(outcome, follow_up, group_names=cur_group_names)\n \n def remove_outcome(self, outcome_name):\n if outcome_name is None:\n print(\"Tried to remove a None outcome\")\n return\n self.outcome_names_to_follow_ups.pop(outcome_name)\n for study in self.studies:\n study.remove_outcome(outcome_name)\n \n def add_group(self, group_name, outcome_name, follow_up_name=None):\n ####\n # A note on adding new groups: per consultation with the wise sir\n # Thomas Trikalinos, a decision has been made that when a \n # group is added to an outcome, it is added by default to all\n # the follow ups belonging to said outcome. It is not, however\n # added to all the *outcomes*.\n #\n # However, if the follow_up_name argument is not None, the \n # group will only be added to the specified follow up.\n for study in self.studies:\n cur_outcome = study.outcomes_to_follow_ups[outcome_name]\n if follow_up_name is None:\n for ma_unit in cur_outcome.values():\n ma_unit.add_group(group_name)\n else:\n ma_unit = cur_outcome[follow_up_name]\n ma_unit.add_group(group_name)\n\n print \"added group: %s. cur groups: %s\" % (group_name, self.get_group_names())\n \n def remove_group(self, group_name):\n for study in self.studies:\n for outcome_name in study.outcomes_to_follow_ups.keys():\n cur_outcome = study.outcomes_to_follow_ups[outcome_name]\n for ma_unit in cur_outcome.values():\n ma_unit.remove_group(group_name)\n print \"removed group: %s. cur groups: %s\" % (group_name, self.get_group_names())\n \n def add_follow_up(self, follow_up_name):\n ''' adds the follow-up to *all* outcomes '''\n for outcome in self.get_outcome_names():\n self.add_follow_up_to_outcome(outcome, follow_up_name)\n \n def remove_follow_up(self, follow_up_name):\n ''' removes the follow-up from *all* outcomes '''\n for outcome in self.get_outcome_names():\n self.remove_follow_up_from_outcome(follow_up_name, outcome)\n \n def add_follow_up_to_outcome(self, outcome_name, follow_up_name):\n outcome = self.get_outcome_obj(outcome_name)\n cur_group_names = self.get_group_names()\n if len(cur_group_names) == 0:\n cur_group_names = None\n \n prev_index = max(self.outcome_names_to_follow_ups[outcome.name].keys())\n next_index = prev_index + 1\n\n self.outcome_names_to_follow_ups[outcome.name][next_index] = follow_up_name\n \n for study in self.studies:\n study.add_follow_up_to_outcome(outcome, follow_up_name, group_names = cur_group_names)\n \n def remove_follow_up_from_outcome(self, follow_up_name, outcome_name):\n time_point = self.outcome_names_to_follow_ups[outcome_name].get_key(follow_up_name)\n \n self.outcome_names_to_follow_ups[outcome_name].pop(time_point)\n for study in self.studies:\n study.remove_follow_up_from_outcome(outcome_name, follow_up_name)\n \n def get_group_names(self):\n group_names = []\n for study in self.studies:\n for outcome_name in study.outcomes_to_follow_ups.keys():\n cur_outcome = study.outcomes_to_follow_ups[outcome_name]\n for ma_unit in cur_outcome.values():\n group_names.extend(ma_unit.get_group_names())\n return list(set(group_names))\n\n def get_group_names_for_outcome_fu(self, outcome_name, follow_up):\n group_names = []\n for study in self.studies:\n print study.name\n if study.outcomes_to_follow_ups.has_key(outcome_name):\n if study.outcomes_to_follow_ups[outcome_name].has_key(follow_up):\n cur_ma_unit = study.outcomes_to_follow_ups[outcome_name][follow_up]\n group_names.extend(cur_ma_unit.get_group_names())\n return list(set(group_names))\n \n def change_follow_up_name(self, outcome, old_name, new_name):\n # make sure that the follow up doesn't already exist\n if new_name in self.get_follow_up_names_for_outcome(outcome):\n raise Exception, \"follow up name %s alerady exists for outcome!\" % new_name\n for study in self.studies:\n study.outcomes_to_follow_ups[outcome][new_name] = study.outcomes_to_follow_ups[outcome].pop(old_name)\n # also update the outcomes -> follow-ups dictionary\n follow_up_key= self.outcome_names_to_follow_ups[outcome].get_key(old_name)\n self.outcome_names_to_follow_ups[outcome][follow_up_key] = new_name\n\n def get_follow_up_names(self):\n ''' returns *all* known follow-up names '''\n follow_up_names = []\n ## iterate over each outcome\n for outcome_d in self.outcome_names_to_follow_ups.values():\n follow_up_names.extend(outcome_d.values())\n return list(set(follow_up_names))\n \n def get_study_names(self):\n return [study.name for study in self.studies]\n\n def get_follow_up_names_for_outcome(self, outcome):\n return self.outcome_names_to_follow_ups[outcome].values()\n\n def get_network(self, outcome, time_point):\n node_list = [] # list of all nodes\n adjacency_list = [] # list of edges\n for study in self.studies:\n ma_unit = study.outcomes_to_follow_ups[outcome][time_point]\n group_names = ma_unit.get_group_names()\n for g1 in group_names:\n node_list.append(g1)\n for g2 in [group for group in group_names if group != g1]: \n if self.ma_unit_has_edge_between_groups(ma_unit, [g1, g2]) and\\\n not (g1, g2) in adjacency_list and not (g2, g1) in adjacency_list:\n adjacency_list.append((g1,g2)) \n\n return (list(set(node_list)), adjacency_list)\n \n def ma_unit_has_edge_between_groups(self, ma_unit, groups):\n # first check the effects. if *any* effect contains data\n # comparing these two groups, we return true.\n comp_str = \"-\".join(groups)\n for effect in ma_unit.get_effect_names():\n comp_str_present = comp_str in ma_unit.get_group_strings(effect)\n # fix for issue where for some reason we were trying to get\n # estimates from one-arm effects (nonsensical)\n try:\n est_not_None = ma_unit.get_estimate(effect, comp_str) is not None\n except KeyError:\n est_not_None = False\n if comp_str_present and est_not_None:\n return True\n\n # now check if they all have raw data\n for group in groups:\n if \"\" in ma_unit.get_raw_data_for_group(group):\n return False\n return True\n \n def cmp_studies(self, compare_by=\"name\", reverse=True, ordered_list=None, directions_to_ma_unit=None, mult=None):\n '''\n compare studies in various ways -- pass the returned function\n to the (built-in) sort function.\n\n compare_by is either 'name', 'year' or 'ordered list'; if it's anything else,\n we assume it's a covariate and sort by that. ordered_list allows\n you to sort arbitrarily in the order specified by the list.\n '''\n \n\n \n # Assign stuff conditionally\n def val_if_key_in_dict(key, dictionary):\n if key in dictionary:\n return dictionary[key]\n else:\n return None\n \n if directions_to_ma_unit is not None:\n keys = ['outcome_name','follow_up','current_groups','data_index',\n 'current_effect','group_str','outcome_type']\n (outcome_name, follow_up, current_groups, data_index, current_effect,\n group_str, outcome_type) = [val_if_key_in_dict(x,directions_to_ma_unit) for x in keys]\n \n if compare_by == \"name\":\n return lambda study_a, study_b : self._meta_cmp_wrapper(study_a, study_b,\\\n study_a.name, study_b.name, reverse)\n elif compare_by == \"year\":\n return lambda study_a, study_b : self._meta_cmp_wrapper(study_a, study_b, study_a.year, \\\n study_b.year, reverse)\n elif compare_by == 'raw_data':\n def f(study_a, study_b):\n ma_unit_A = study_a.get_ma_unit(outcome_name,follow_up)\n ma_unit_B = study_b.get_ma_unit(outcome_name,follow_up)\n raw_data_A = ma_unit_A.get_raw_data_for_groups(current_groups)\n raw_data_B = ma_unit_B.get_raw_data_for_groups(current_groups)\n study_a_val = raw_data_A[data_index]\n study_b_val = raw_data_B[data_index]\n return self._meta_cmp_wrapper(study_a, study_b, study_a_val, study_b_val, reverse)\n return f\n elif compare_by == 'outcomes':\n if mult is None:\n raise ValueError(\"mult must be specified\")\n \n def f(study_a, study_b):\n ma_unit_A = study_a.get_ma_unit(outcome_name,follow_up)\n ma_unit_B = study_b.get_ma_unit(outcome_name,follow_up)\n \n if outcome_type is BINARY:\n to_display_scale = lambda x: meta_py_r.binary_convert_scale(x, current_effect, convert_to=\"display.scale\")\n elif outcome_type is CONTINUOUS:\n to_display_scale = lambda x: meta_py_r.continuous_convert_scale(x, current_effect, convert_to=\"display.scale\")\n elif outcome_type is DIAGNOSTIC:\n to_display_scale = lambda x: meta_py_r.diagnostic_convert_scale(x, current_effect, convert_to=\"display.scale\")\n \n if outcome_type in (BINARY,CONTINUOUS):\n outcome_data_A = ma_unit_A.get_effect_and_ci(current_effect, group_str, mult)\n outcome_data_B = ma_unit_B.get_effect_and_ci(current_effect, group_str, mult) \n outcome_data_A = [to_display_scale(c_val) for c_val in outcome_data_A]\n outcome_data_B = [to_display_scale(c_val) for c_val in outcome_data_B]\n elif outcome_type == DIAGNOSTIC:\n # /\\/\\/\\/\\\n (outcome_data_A, outcome_data_B) = ([],[])\n # |\n # \\_____/\n for diag_metric in [\"Sens\",\"Spec\"]: # this order corresponds to the order displayed on the spreadsheet\n est_and_ci_A = ma_unit_A.get_effect_and_ci(diag_metric, group_str, mult)\n est_and_ci_B = ma_unit_B.get_effect_and_ci(diag_metric, group_str, mult)\n est_and_ci_A = [to_display_scale(c_val) for c_val in est_and_ci_A]\n est_and_ci_B = [to_display_scale(c_val) for c_val in est_and_ci_B]\n \n outcome_data_A.extend(est_and_ci_A)\n outcome_data_B.extend(est_and_ci_B)\n study_a_val = outcome_data_A[data_index]\n study_b_val = outcome_data_B[data_index]\n \n return self._meta_cmp_wrapper(study_a, study_b, study_a_val, study_b_val, reverse)\n return f\n \n elif compare_by == \"ordered_list\":\n # then just use the list order\n return lambda study_a, study_b : self._meta_cmp_wrapper(study_a, study_b, \\\n ordered_list.index(study_a.name), \\\n ordered_list.index(study_b.name), \\\n reverse=False)\n else:\n # then we assume that we're sorting by a covariate\n # always want missing values at the 'bottom'\n missing_val = float(\"-infinity\") if reverse else float(\"infinity\")\n missing_to_zero = lambda d, s : d[s] if s in d else missing_val\n\n return lambda study_a, study_b : self._meta_cmp_wrapper(study_a,\\\n study_b,\\\n missing_to_zero(study_a.covariate_dict, compare_by), \\\n missing_to_zero(study_b.covariate_dict, compare_by), \\\n reverse)\n \n\n def _both_empty(self, a, b):\n return a in EMPTY_VALS and b in EMPTY_VALS\n \n def _meta_cmp_wrapper(self, study_a, study_b, study_a_val, study_b_val, reverse):\n '''\n This is a bit kludgey -- we wrap the cmp wrapper in cases where the study names are not\n being compared. This is to avoid comparisons of two empty values. For example, if we are \n sorting by a covariate, and it is empty in two studies, we want to then sort these studies by \n their names. \n '''\n if self._both_empty(study_a_val, study_b_val):\n # both values being compared are empty; sort by study names\n return self._cmp_wrapper(study_a.name, study_b.name, reverse)\n else:\n # at least one has a value; proceed as usual.\n return self._cmp_wrapper(study_a_val, study_b_val, reverse)\n \n def _cmp_wrapper(self, study_a_val, study_b_val, reverse):\n '''\n Wraps the default compare method to assert that \"\" (i.e., empty studies)\n are greater than non-empties\n '''\n flip_sign = -1 if reverse else 1\n if study_a_val in EMPTY_VALS: \n return flip_sign*1\n elif study_b_val in EMPTY_VALS:\n return flip_sign*-1\n else:\n return cmp(study_a_val, study_b_val)\n\n \nclass Study:\n '''\n This class represents a study. It basically holds a \n list of of meta-analytic units, on which analyses can\n be performed, and some meta-data (e.g., study name)\n '''\n def __init__(self, id, name=\"\", year=None, include=True):\n # TODO should fiddle with the include field here. \n # when a study is auto-added, it should be excluded\n # until there is sufficient data\n self.id = id\n self.year = year\n self.name= name\n\n self.N = None\n self.notes = \"\"\n # this dictionary maps outcome names to dictionaries\n # which in turn map follow up ids to MetaAnalyticUnit \n # objects.\n self.outcomes_to_follow_ups = {}\n # also maintain a list of the known outcome objects\n self.outcomes = []\n # whether or not this study will be included in any\n # conducted analyses\n self.include = include\n # an empty dictionary that will map covariate names\n # to their values for *this* study.\n self.covariate_dict = {}\n self.manually_excluded = False\n \n def __str__(self):\n return self.name\n \n def get_ma_unit(self, outcome, follow_up,):\n try:\n return self.outcomes_to_follow_ups[outcome][follow_up]\n except:\n raise Exception, \"You're trying to access an ma_unit that doesn't exist\"\n\n def add_outcome(self, outcome, follow_up_name=\"first\", group_names=None):\n ''' Adds a new, blank outcome (i.e., no raw data) '''\n if outcome.name in self.outcomes_to_follow_ups.keys():\n raise Exception, \"Study already contains an outcome named %s\" % outcome.name\n self.outcomes_to_follow_ups[outcome.name] = {}\n self.outcomes_to_follow_ups[outcome.name][follow_up_name] = \\\n MetaAnalyticUnit(outcome, group_names=group_names)\n self.outcomes.append(outcome)\n \n def remove_outcome(self, outcome_name):\n self.outcomes_to_follow_ups.pop(outcome_name)\n for outcome in self.outcomes:\n if outcome.name == outcome_name:\n self.outcomes.remove(outcome)\n \n def add_outcome_at_follow_up(self, outcome, follow_up):\n self.outcomes_to_follow_ups[outcome.name][follow_up] = MetaAnalyticUnit(outcome)\n \n def get_outcome(self, outcome_name):\n for outcome in self.outcomes:\n if outcome.name == outcome_name:\n return outcome\n return None\n \n def get_outcome_names(self):\n return [outcome.name for outcome in self.outcomes]\n \n def add_follow_up_to_outcome(self, outcome, follow_up_name, group_names=None):\n self.outcomes_to_follow_ups[outcome.name][follow_up_name] = \\\n MetaAnalyticUnit(outcome, group_names=group_names)\n \n def remove_follow_up_from_outcome(self, outcome, follow_up_name):\n outcome_name = outcome\n if isinstance(outcome, Outcome):\n outcome_name = outcome.name\n\n self.outcomes_to_follow_ups[outcome_name].pop(follow_up_name)\n \n def add_ma_unit(self, unit, follow_up):\n if not unit.outcome in self.outcomes_to_follow_ups:\n self.add_outcome(unit.outcome, follow_up)\n\n self.outcomes_to_follow_ups[unit.outcome.name][follow_up] = unit\n\n \nclass MetaAnalyticUnit:\n '''\n This class is the unit of analysis. It corresponds to a single\n time period for a particular outcome for a dataset. Note that\n it (may) contain multiple groups!\n '''\n \n def __init__(self, outcome, raw_data=None, group_names=None):\n '''\n Instantiate a new MetaAnalyticUnit, which is specific to a \n given study/outcome pair. \n \n @params:\n ===\n outcome -- Outcome object, this tells us what sort of data type\n we have\n raw_data -- If provided, it is assumed to be a nested list, where\n the first sublist is the raw data (num_events, num_total) \n for the treated group and the second corresponds to the\n control group (if applicable)\n '''\n # diagnostic outcome?\n self.is_diag = outcome.data_type == DIAGNOSTIC\n self.outcome = outcome\n\n if group_names is None and not self.is_diag:\n group_names = meta_globals.DEFAULT_GROUP_NAMES\n elif group_names is None:\n group_names = [\"test 1\"]\n\n # TreatmentGroup ids to effect scalars.\n self.tx_groups = {}\n \n self.raw_data_length = 0\n if outcome.data_type == BINARY:\n self.raw_data_length = 2\n elif outcome.data_type == CONTINUOUS:\n self.raw_data_length = 3\n elif outcome.data_type == DIAGNOSTIC:\n self.raw_data_length = 4\n else:\n raise Exception, \"Unrecognized outcome data type, '%s' was given\" % outcome.data_type\n \n # Makes list of (empty lists of length of raw_data): \n raw_data = raw_data or \\\n [[\"\" for n in range(self.raw_data_length)] for group in group_names]\n\n self.effects_dict = {}\n \n # now we initialize the outcome dictionaries.\n if self.outcome.data_type == BINARY:\n for effect in meta_globals.BINARY_TWO_ARM_METRICS + meta_globals.BINARY_ONE_ARM_METRICS:\n self.effects_dict[effect]={}\n elif self.outcome.data_type == CONTINUOUS:\n # note right now we only have mean difference and standardized mean difference\n # @TODO hedge's G, cohen's D, glass delta; WV doesn't implement these\n for effect in meta_globals.CONTINUOUS_TWO_ARM_METRICS + meta_globals.CONTINUOUS_ONE_ARM_METRICS:\n self.effects_dict[effect]={} \n elif self.outcome.data_type == DIAGNOSTIC:\n for effect in meta_globals.DIAGNOSTIC_METRICS:\n self.effects_dict[effect]={}\n \n # add the two default groups: treatment and control; note that the raw data\n # is held at the *group* level\n for i, group in enumerate(group_names):\n self.add_group(group)\n self.tx_groups[group].raw_data = raw_data[i]\n \n def get_init_effect_d(self):\n # these are the dictionaries that actually hold the effects (estimate, \n # CI, etc.). note: *always* copy this dictionary, never use it directly.\n return {\"est\":None,\n \"lower\":None,\n \"upper\":None,\n \"SE\":None,\n \"display_est\":None,\n \"display_lower\":None,\n \"display_upper\":None,\n } \n \n def update_effects_dict_with_group(self, new_group):\n '''\n When a new group is added, the effects dictionary will not contain\n entries for it. Thus this method must be called to update the dictionary\n with keys corresponding to this group (for one-arm metrics) and \n keys corresponding to pairwise combinations of this with other groups.\n '''\n\n group_names = self.tx_groups.keys() # existing groups\n if self.outcome.data_type == BINARY:\n # we assume that an entry for each effect already exists!\n for effect in meta_globals.BINARY_TWO_ARM_METRICS:\n for group in group_names:\n # Note that effect sizes that are entered directly\n # must correspond to a particular *pair* of tx groups, moreover the \n # order matters i.e., the effect for tx a v. tx b is different than the reverse.\n # We take care of this by mapping strings `txA-txB` to effect dictionaries\n groups_str = \"-\".join((new_group, group))\n self.effects_dict[effect][groups_str] = self.get_init_effect_d()\n # ... and the reverse (see above comment)\n groups_str = \"-\".join((group, new_group))\n self.effects_dict[effect][groups_str] = self.get_init_effect_d()\n for effect in meta_globals.BINARY_ONE_ARM_METRICS:\n self.effects_dict[effect][new_group] = self.get_init_effect_d()\n elif self.outcome.data_type == CONTINUOUS:\n for effect in meta_globals.CONTINUOUS_TWO_ARM_METRICS:\n for group in group_names:\n groups_str = \"-\".join((new_group, group))\n self.effects_dict[effect][groups_str] = self.get_init_effect_d()\n # and the reverse\n groups_str = \"-\".join((group, new_group))\n self.effects_dict[effect][groups_str] = self.get_init_effect_d() \n for effect in meta_globals.CONTINUOUS_ONE_ARM_METRICS:\n self.effects_dict[effect][new_group] = self.get_init_effect_d()\n elif self.outcome.data_type == DIAGNOSTIC:\n # diagnostic data\n for effect in meta_globals.DIAGNOSTIC_METRICS:\n self.effects_dict[effect][new_group] = self.get_init_effect_d()\n \n def calculate_SE_if_possible(self, effect, group_str, est=None, lower=None, upper=None, mult=None):\n if mult is None:\n raise ValueError(\"Mult must be specified\")\n \n # get SE\n if est is None:\n est = self.effects_dict[effect][group_str][\"est\"]\n if lower is None:\n lower = self.effects_dict[effect][group_str][\"lower\"]\n if upper is None:\n upper = self.effects_dict[effect][group_str][\"upper\"]\n \n print(\"Using the following values to calculate se:\")\n print(\" (est,lower,upper, mult) = (%s,%s,%s, %s)\" % (str(est),str(lower),str(upper), str(mult)))\n try:\n se = (upper - est)/mult\n except:\n try:\n se = (est - lower)/mult\n except:\n try:\n se = (upper - lower)/(2*mult)\n except:\n se = None \n return se\n \n def set_effect(self, effect, group_str, value):\n self.effects_dict[effect][group_str][\"est\"] = value\n def set_lower(self, effect, group_str, lower):\n self.effects_dict[effect][group_str][\"lower\"] = lower\n def set_upper(self, effect, group_str, upper):\n self.effects_dict[effect][group_str][\"upper\"] = upper\n def set_SE(self, effect, group_str, se):\n self.effects_dict[effect][group_str][\"SE\"] = se\n \n def set_display_effect(self, effect, group_str, value):\n self.effects_dict[effect][group_str][\"display_est\"] = value\n def set_display_lower(self, effect, group_str, lower):\n self.effects_dict[effect][group_str][\"display_lower\"] = lower\n def set_display_upper(self, effect, group_str, upper):\n self.effects_dict[effect][group_str][\"display_upper\"] = upper\n # Should this exist?\n def set_display_se(self, effect, group_str, se):\n self.effects_dict[effect][group_str][\"display_se\"] = se\n \n def calculate_display_effect_and_ci(self, effect, group_str, convert_to_display_scale, conf_level=None, mult=None, check_if_necessary=False, n1=None):\n if None in [conf_level, mult]:\n raise ValueError(\"confidence level & mult must be specified\")\n \n # only runs if it is necessary to do so\n if check_if_necessary:\n if self._should_calculate_display_effect_and_ci_and_se(effect, group_str, conf_level):\n pass # just continue and do the calculation\n else:\n #print(\"We don't have to recalculate display_effect and ci-->leaving\")\n return # we don't have to recalculate so exit\n \n if convert_to_display_scale is None:\n # This shouldn't happen.... debug it!\n #pyqtRemoveInputHook()\n #pdb.set_trace()\n raise Exception(\"calculate_display_effect_and_ci(ma_dataset): convert_to_display_scale is None!\")\n \n '''Calculates display effect and ci and stores the results in the\n various 'display_' variables '''\n est, lower, upper = self.get_effect_and_ci(effect, group_str, mult)\n d_est, d_lower, d_upper = [convert_to_display_scale(x) for x in [est, lower, upper]]\n se = self.get_se(effect, group_str, mult)\n d_se = se\n #d_se = convert_to_display_scale(se) # this doesn't mean anything...i suppose its just to check to see if we have an se value\n \n print(\"results of calculating display effect and ci: (est,low,high,se(calc scale): %s, %s, %s, %s\" % (d_est,d_lower,d_upper,se))\n \n self.set_display_effect(effect, group_str, d_est)\n self.set_display_lower(effect, group_str, d_lower)\n self.set_display_upper(effect, group_str, d_upper)\n self.set_display_se(effect, group_str, d_se)\n self.effects_dict[effect][group_str][\"display_conf_level\"] = conf_level\n \n \n def get_display_effect(self, effect, group_str):\n try:\n if \"display_est\" in self.effects_dict[effect][group_str]:\n return self.effects_dict[effect][group_str][\"display_est\"]\n else:\n return None\n except:\n pyqtRemoveInputHook()\n pdb.set_trace()\n \n def get_display_lower(self, effect, group_str):\n if \"display_lower\" in self.effects_dict[effect][group_str]:\n return self.effects_dict[effect][group_str][\"display_lower\"]\n else:\n return None \n def get_display_upper(self, effect, group_str):\n if \"display_upper\" in self.effects_dict[effect][group_str]:\n return self.effects_dict[effect][group_str][\"display_upper\"]\n else:\n return None\n def get_display_se(self, effect, group_str):\n if \"display_se\" in self.effects_dict[effect][group_str]:\n return self.effects_dict[effect][group_str][\"display_se\"]\n else:\n return None\n \n \n \n \n def get_display_effect_and_ci(self, effect, group_str, convert_to_display_scale=None):\n return (self.get_display_effect(effect, group_str),\n self.get_display_lower(effect, group_str),\n self.get_display_upper(effect, group_str))\n \n def get_display_effect_and_se(self, effect, group_str, convert_to_display_scale=None):\n return (self.get_display_effect(effect, group_str),\n self.get_display_se(effect, group_str))\n \n def _should_calculate_display_effect_and_ci_and_se(self, effect, group_str, conf_level=None):\n if conf_level is None:\n raise ValueError(\"Confidence level must be specified\")\n \n existing_display_conf_level = \"display_conf_level\" in self.effects_dict[effect][group_str].keys()\n if existing_display_conf_level:\n display_cl = self.effects_dict[effect][group_str][\"display_conf_level\"] # conf level @ which display values were computed\n disp_cl_eq_global_cl = meta_globals.equal_close_enough(\n display_cl,\n conf_level)\n if disp_cl_eq_global_cl:\n result = False # we are ok, don't have to do anything special\n else:\n result = True\n else:\n result = True\n return result\n \n def get_estimate(self, effect, group_str):\n if \"est\" in self.effects_dict[effect][group_str]:\n return self.effects_dict[effect][group_str][\"est\"]\n else:\n return None\n\n \n \n def get_lower(self, effect, group_str, mult): \n return self._helper_get_upper_lower(\"lower\", effect, group_str, mult)\n \n def get_upper(self, effect, group_str, mult): \n return self._helper_get_upper_lower(\"upper\", effect, group_str, mult)\n \n def _helper_get_upper_lower(self, boundary, effect, group_str, mult=None):\n if mult is None:\n raise ValueError(\"Mult must be specified\")\n \n if boundary not in [\"upper\",\"lower\"]:\n raise Exception(\"Boundary must be one of 'upper' or 'lower'\")\n \n if self.get_se(effect, group_str, mult) is None:\n return self.effects_dict[effect][group_str][boundary]\n est = self.get_estimate(effect, group_str)\n se = self.get_se(effect, group_str, mult)\n if est is None or se is None:\n return None\n if boundary == \"lower\":\n return (est-mult*se)\n elif boundary == \"upper\":\n return (est+mult*se)\n else:\n raise Exception(\"BOUNDARY NOT RECOGNIZED\")\n \n \n def get_se(self, effect, group_str, mult):\n if \"SE\" in self.effects_dict[effect][group_str]:\n print(\"SE found: %s\" % str(self.effects_dict[effect][group_str][\"SE\"]))\n se = self.effects_dict[effect][group_str][\"SE\"]\n if se is None:\n new_se = self.calculate_SE_if_possible(effect, group_str, mult=mult)\n print(\"new se is %s\" % str(new_se))\n return new_se\n return se\n else:\n #return None\n return self.calculate_SE_if_possible(effect, group_str, mult=mult)\n \n def set_effect_and_ci(self, effect, group_str, est, lower, upper, mult):\n '''also calculated se if possible '''\n \n self.set_effect(effect, group_str, est)\n self.effects_dict[effect][group_str][\"lower\"] = lower\n self.effects_dict[effect][group_str][\"upper\"] = upper\n \n se = self.calculate_SE_if_possible(effect, group_str, est, lower, upper, mult=mult)\n self.set_SE(effect, group_str, se)\n \n def get_effect_and_ci(self, effect, group_str, mult):\n return (self.get_estimate(effect, group_str),\n self.get_lower(effect, group_str, mult),\n self.get_upper(effect, group_str, mult),\n )\n def get_effect_and_se(self, effect, group_str, mult):\n return (self.get_estimate(effect, group_str),\n self.get_se(effect, group_str, mult),\n )\n \n def get_entered_effect_and_ci(self, effect, group_str):\n return (self.effects_dict[effect][group_str][\"est\"],\n self.effects_dict[effect][group_str][\"lower\"],\n self.effects_dict[effect][group_str][\"upper\"],)\n \n def get_effect_dict(self, effect, group_str):\n return self.effects_dict[effect][group_str]\n \n def get_group_strings(self, effect):\n return self.effects_dict[effect].keys()\n \n def get_effects_dict(self):\n ''' Be careful with using this because this returns the actual effects\n dict, not a copy '''\n return self.effects_dict\n \n def get_effect_names(self):\n return self.effects_dict.keys()\n \n def type(self):\n return self.outcome.data_type\n \n def add_group(self, name, raw_data=None):\n if len(self.tx_groups.keys()) == 0:\n grp_id = 0\n else:\n grp_id = max([group.id for group in self.tx_groups.values()]) + 1\n if raw_data is None:\n raw_data = [\"\" for x in range(self.raw_data_length)]\n # Here we add this group to the set of group keys --\n # see inline documentation in this method for details\n self.update_effects_dict_with_group(name)\n self.tx_groups[name] = TreatmentGroup(grp_id, name, raw_data)\n \n \n def remove_group(self, name):\n self.tx_groups.pop(name)\n \n def rename_group(self, old_name, new_name):\n self.tx_groups[new_name] = self.tx_groups[old_name]\n self.tx_groups.pop(old_name)\n\n ##\n # also need to deal with the strings for outcome data\n # i.e., issue #112\n keys_to_pop = [] # keep track of antiquated group names to be removed\n for effect in list(self.effects_dict.keys()):\n for group_str in list(self.effects_dict[effect]):\n if old_name in group_str:\n str_changed = False\n cur_group_names = group_str.split(\"-\")\n cur_vals = self.effects_dict[effect][group_str]\n updated_group_strs = []\n for cur_group_name in cur_group_names:\n if cur_group_name == old_name:\n updated_group_strs.append(new_name)\n str_changed=True\n else:\n updated_group_strs.append(cur_group_name)\n \n # if the string changed, then we pop \n # the old version and add the new\n if str_changed:\n new_str = \"-\".join(updated_group_strs)\n self.effects_dict[effect][new_str] = self.effects_dict[effect][group_str]\n keys_to_pop.append(group_str)\n\n # now remove any antiquated group names from the effects dictionary\n for old_group_name in keys_to_pop:\n if old_group_name in self.effects_dict[effect]:\n self.effects_dict[effect].pop(old_group_name) \n\n\n \n def get_raw_data_for_group(self, group_name):\n return self.tx_groups[group_name].raw_data\n \n def set_raw_data_for_group(self, group_name, raw_data):\n self.tx_groups[group_name].raw_data = raw_data\n \n def get_raw_data_for_groups(self, groups):\n if len(groups) == 1:\n return self.get_raw_data_for_group(groups[0])\n raw_data = []\n for group in groups:\n raw_data.extend(self.get_raw_data_for_group(group))\n return raw_data\n \n def set_raw_data_for_groups(self, groups, raw_data_list):\n # note: raw_data_list should be a *nested list*, where entry\n # i is the raw data for groups[i]. \n for i,group in enumerate(groups):\n self.set_raw_data_for_group(group, raw_data_list[i])\n \n def get_group_names(self):\n return self.tx_groups.keys()\n \n \nclass TreatmentGroup:\n def __init__(self, id, name, raw_data):\n self.id = id\n self.name = name\n self.raw_data = raw_data \n \n \nclass Outcome:\n ''' Holds a few fields that define outcomes. '''\n def __init__(self, name, data_type, links=None, sub_type=None):\n self.name = name\n self.data_type = data_type\n self.links = links\n self.sub_type = sub_type # more specific than just binary, cont, diag, etc.\n \nclass Covariate:\n ''' Meta-data about covariates. '''\n def __init__(self, name, data_type):\n if not data_type in (\"factor\", \"continuous\"):\n raise Exception, \\\n \"covariates need to have associated type factor or continuous; %s was given\" % data_type\n self.name = name\n self.data_type = CONTINUOUS if data_type == \"continuous\" else FACTOR\n \n def get_type_str(self):\n return {CONTINUOUS:\"continuous\", FACTOR:\"factor\"}[self.data_type]\n def get_data_type(self):\n return self.data_type\n\nclass Link:\n pass\n \n\n\n\n \n \n \n" }, { "alpha_fraction": 0.40614888072013855, "alphanum_fraction": 0.40614888072013855, "avg_line_length": 22.719999313354492, "blob_id": "89bf8268d2c2a33ac8c4030cf0b8028e7a3ef8fe", "content_id": "d9d3bcd06fb570f8604936426b9d288f981a82c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 618, "license_type": "no_license", "max_line_length": 138, "num_lines": 25, "path": "/src/R/HSROC/R/f.rij.R", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "f.rij <-\r\nfunction (l, u, r) \r\n{\r\n if ((is.na(l) == TRUE) | (is.na(u) == TRUE) | (is.na(r) == \r\n TRUE)) {\r\n files.remove()\r\n }\r\n if ((is.na(l) == TRUE) | (is.na(u) == TRUE) | (is.na(r) == \r\n TRUE)) {\r\n cat(paste(\"Unsuitable initial values were provided. \"))\r\n stop(\"Please respecify and call HSROC() again.\\n If you're using 'init=NULL' you need just to run the 'HSROC' function again.\\n\")\r\n }\r\n if (r < l) {\r\n b = l\r\n }\r\n else {\r\n if (r > u) {\r\n b = u\r\n }\r\n else {\r\n b = r\r\n }\r\n }\r\n return(b)\r\n}\r\n" }, { "alpha_fraction": 0.2933884263038635, "alphanum_fraction": 0.3181818127632141, "avg_line_length": 13.125, "blob_id": "988efa082388a56e14f1d1606b2422d33bb05135", "content_id": "bfec67c49bee2afdb1947ebd77454774d1aac81a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 242, "license_type": "no_license", "max_line_length": 29, "num_lines": 16, "path": "/src/R/HSROC/R/f.LAMBDA.R", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "f.LAMBDA <-\r\nfunction (a, borne1, borne2) \r\n{\r\n if (a > borne2) {\r\n b = borne2\r\n }\r\n else {\r\n if (a < borne1) {\r\n b = borne1\r\n }\r\n else {\r\n b = a\r\n }\r\n }\r\n return(b)\r\n}\r\n" }, { "alpha_fraction": 0.5341076850891113, "alphanum_fraction": 0.5407276153564453, "avg_line_length": 43.135459899902344, "blob_id": "5d9412b5126ca21798e90606ea9afd4210ba2286", "content_id": "4c6d8a79d7f9e1f1f46102e6c3a8779179eabe5f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 33233, "license_type": "no_license", "max_line_length": 139, "num_lines": 753, "path": "/src/binary_data_form.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "######################################\n# #\n# Byron C. Wallace #\n# George Dietz #\n# CEBM @ Brown #\n# OpenMeta[analyst] ##########################\n# --- #\n# Binary data form module; for flexible entry of dichotomous #\n# outcome data #\n###############################################################\n\nimport copy\nfrom functools import partial\n\nfrom PyQt4.Qt import *\nfrom PyQt4.QtGui import *\n\nimport meta_py_r\nfrom meta_globals import *\nimport calculator_routines as calc_fncs\n\nimport forms.ui_binary_data_form\nimport forms.ui_choose_back_calc_result_form\n\n# this is the maximum size of a residual that we're willing to accept\n# when computing 2x2 data\nTHRESHOLD = 1e-5\n\n\nclass BinaryDataForm2(QDialog, forms.ui_binary_data_form.Ui_BinaryDataForm):\n def __init__(self, ma_unit, cur_txs, cur_group_str, cur_effect, conf_level=None, parent=None):\n super(BinaryDataForm2, self).__init__(parent)\n self.setupUi(self)\n \n if conf_level is None:\n raise ValueError(\"Confidence level must be specified\")\n self.global_conf_level = conf_level\n self.mult=meta_py_r.get_mult_from_r(self.global_conf_level)\n \n self._setup_signals_and_slots()\n \n # Assign stuff\n self.ma_unit = ma_unit\n self.cur_groups = cur_txs\n self.group_str = cur_group_str\n self.cur_effect = cur_effect\n self.entry_widgets = [self.raw_data_table, self.low_txt_box,\n self.high_txt_box, self.effect_txt_box]\n self.text_boxes = [self.low_txt_box, self.high_txt_box, self.effect_txt_box]\n \n self.ci_label.setText(\"{0:.1f}% Confidence Interval\".format(self.global_conf_level))\n self.initialize_form() # initialize all cell to empty items\n self.setup_inconsistency_checking()\n self.undoStack = QUndoStack(self)\n\n #self.setup_clear_button_palettes() # Color for clear_button_pallette\n self._update_raw_data() # ma_unit --> table\n self._populate_effect_data() # make combo boxes for effects\n self.set_current_effect() # fill in current effect data in line edits\n self._update_data_table() # fill in 2x2\n self.enable_back_calculation_btn()\n\n def initialize_form(self):\n ''' Initialize all cells to empty items '''\n print(\"Entering initialize_table_items\")\n \n nrows = self.raw_data_table.rowCount()\n ncols = self.raw_data_table.columnCount()\n \n for row in range(nrows):\n for col in range(ncols):\n self._set_val(row, col, None)\n\n for txt_box in self.text_boxes:\n txt_box.setText(QString(\"\"))\n\n# def setup_clear_button_palettes(self):\n# # Color for clear_button_pallette\n# self.orig_palette = self.clear_Btn.palette()\n# self.pushme_palette = QPalette()\n# self.pushme_palette.setColor(QPalette.ButtonText, Qt.red)\n# #self.set_clear_btn_color()\n\n# def set_clear_btn_color(self):\n# if calc_fncs._input_fields_disabled(self.raw_data_table, self.text_boxes):\n# self.clear_Btn.setPalette(self.pushme_palette)\n# else:\n# self.clear_Btn.setPalette(self.orig_palette)\n\n def print_effects_dict_from_ma_unit(self):\n print self.ma_unit.get_effects_dict()\n\n def enable_back_calculation_btn(self, engage=False):\n # For undo/redo\n old_ma_unit, old_table = self._save_ma_unit_and_table_state(\n table = self.raw_data_table,\n ma_unit = self.ma_unit, \n use_old_value=False)\n \n def build_back_calc_args_dict():\n\n d = {}\n d[\"metric\"] = str(self.cur_effect)\n\n est,lower,upper = self.ma_unit.get_effect_and_ci(self.cur_effect, self.group_str, self.mult)\n conv_to_disp_scale = lambda x: meta_py_r.binary_convert_scale(x, self.cur_effect, convert_to=\"display.scale\")\n d_est,d_lower,d_upper = [conv_to_disp_scale(x) for x in [est,lower,upper]]\n for i,R_key in enumerate([\"estimate\", \"lower\", \"upper\"]):\n try:\n d[\"%s\" % R_key] = float([d_est,d_lower,d_upper][i])\n except:\n d[\"%s\" % R_key] = None\n\n d[\"conf.level\"] = self.global_conf_level\n\n d[\"Ev_A\"] = float(self._get_int(0, 0)) if not self._is_empty(0, 0) else None\n d[\"N_A\"] = float(self._get_int(0, 2)) if not self._is_empty(0, 2) else None\n d[\"Ev_B\"] = float(self._get_int(1, 0)) if not self._is_empty(1, 0) else None\n d[\"N_B\"] = float(self._get_int(1, 2)) if not self._is_empty(1, 2) else None\n\n return d\n def new_data(bin_data, imputed):\n changed = False\n old_data = (bin_data[\"Ev_A\"],\n bin_data[\"N_A\"],\n bin_data[\"Ev_B\"],\n bin_data[\"N_B\"])\n new_data = []\n new_data.append((int(round(imputed[\"op1\"][\"a\"])),\n int(round(imputed[\"op1\"][\"b\"])),\n int(round(imputed[\"op1\"][\"c\"])),\n int(round(imputed[\"op1\"][\"d\"])),\n ))\n if \"op2\" in imputed:\n new_data.append((int(round(imputed[\"op2\"][\"a\"])),\n int(round(imputed[\"op2\"][\"b\"])),\n int(round(imputed[\"op2\"][\"c\"])),\n int(round(imputed[\"op2\"][\"d\"])),\n ))\n def new_item_available(old,new):\n isBlank = lambda x: x in EMPTY_VALS\n no_longer_blank = isBlank(old) and not isBlank(new)\n return no_longer_blank\n comparison0 = [new_item_available(old_data[i], new_data[0][i]) for i in range(len(old_data))]\n new_data_in_op1 = any(comparison0)\n print(\"Comparison0:\", comparison0)\n\n if new_data_in_op1:\n changed = True\n if \"op2\" in imputed:\n comparison1 = [new_item_available(old_data[i], new_data[1][i]) for i in range(len(old_data))]\n print(\"Comparison1:\", comparison1)\n new_data_in_op2 = any(comparison1)\n if not new_data_in_op2:\n changed = False\n else:\n changed = False\n\n return changed\n ### end of new_data() definition ####\n\n # Makes no sense to show the button on a form where the back\n # calculation is not implemented\n if not self.cur_effect in [\"OR\", \"RR\", \"RD\"]:\n self.back_calc_btn.setVisible(False)\n return None\n else:\n self.back_calc_btn.setVisible(True)\n\n bin_data = build_back_calc_args_dict()\n print(\"Binary data for back-calculation:\", bin_data)\n\n imputed = meta_py_r.impute_bin_data(bin_data.copy())\n print(\"Imputed data: %s\", imputed)\n\n # Leave if nothing was imputed\n if \"FAIL\" in imputed:\n print(\"Fail to impute\")\n self.back_calc_btn.setEnabled(False)\n return None\n\n if new_data(bin_data, imputed):\n self.back_calc_btn.setEnabled(True)\n else:\n self.back_calc_btn.setEnabled(False)\n\n #self.set_clear_btn_color()\n\n if not engage:\n return None\n ########################################################################\n # Actually do stuff with imputed data here if we are 'engaged'\n ########################################################################\n for x in range(3):\n self.clear_column(x) # clear out the table\n\n if len(imputed.keys()) > 1:\n dialog = ChooseBackCalcResultForm(imputed, parent=self)\n if dialog.exec_():\n choice = dialog.getChoice()\n else: # don't do anything if cancelled\n return None\n else: # only one option\n choice = \"op1\"\n\n # set values in table & save in ma_unit\n self.raw_data_table.blockSignals(True)\n self._set_val(0, 0, int(round(imputed[choice][\"a\"])))\n self._set_val(0, 2, int(round(imputed[choice][\"b\"])))\n self._set_val(1, 0, int(round(imputed[choice][\"c\"])))\n self._set_val(1, 2, int(round(imputed[choice][\"d\"])))\n self.raw_data_table.blockSignals(False)\n\n self._update_data_table()\n self._update_ma_unit() # save in ma_unit\n #self.set_clear_btn_color()\n \n # for undo/redo\n new_ma_unit, new_table = self._save_ma_unit_and_table_state(\n table = self.raw_data_table, ma_unit = self.ma_unit,\n use_old_value=False)\n restore_old_f = lambda: self.restore_ma_unit_and_table(old_ma_unit, old_table)\n restore_new_f = lambda: self.restore_ma_unit_and_table(new_ma_unit, new_table)\n command = calc_fncs.CommandFieldChanged(restore_new_f=restore_new_f, restore_old_f=restore_old_f, parent=self)\n self.undoStack.push(command)\n\n def setup_inconsistency_checking(self):\n # set-up inconsistency label\n inconsistency_palette = QPalette()\n inconsistency_palette.setColor(QPalette.WindowText, Qt.red)\n self.inconsistencyLabel.setPalette(inconsistency_palette)\n self.inconsistencyLabel.setVisible(False)\n\n def action_consistent_table():\n self.inconsistencyLabel.setVisible(False)\n self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(True)\n def action_inconsistent_table():\n # show label, disable OK buttonbox button\n self.inconsistencyLabel.setVisible(True)\n self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(False)\n\n self.check_table_consistency = calc_fncs.ConsistencyChecker(\n fn_consistent=action_consistent_table,\n fn_inconsistent=action_inconsistent_table,\n table_2x2=self.raw_data_table)\n\n @pyqtSignature(\"int, int, int, int\")\n def on_raw_data_table_currentCellChanged(self, currentRow, currentColumn, previousRow, previousColumn):\n self.current_item_data = self._get_int(currentRow, currentColumn)\n print \"Current Item Data:\", self.current_item_data \n\n def _setup_signals_and_slots(self):\n QObject.connect(self.raw_data_table, SIGNAL(\"cellChanged(int,int)\"), self.cell_changed)\n\n QObject.connect(self.effect_cbo_box, SIGNAL(\"currentIndexChanged(QString)\"), self.effect_changed)\n QObject.connect(self.clear_Btn, SIGNAL(\"clicked()\"), self.clear_form)\n QObject.connect(self.back_calc_btn, SIGNAL(\"clicked()\"), lambda: self.enable_back_calculation_btn(engage=True))\n\n QObject.connect(self.effect_txt_box, SIGNAL(\"editingFinished()\"), lambda: self.val_changed(\"est\"))\n QObject.connect(self.low_txt_box, SIGNAL(\"editingFinished()\"), lambda: self.val_changed(\"lower\"))\n QObject.connect(self.high_txt_box, SIGNAL(\"editingFinished()\"), lambda: self.val_changed(\"upper\"))\n\n # Add undo/redo actions\n undo = QAction(self)\n redo = QAction(self)\n undo.setShortcut(QKeySequence.Undo)\n redo.setShortcut(QKeySequence.Redo)\n self.addAction(undo)\n self.addAction(redo)\n QObject.connect(undo, SIGNAL(\"triggered()\"), self.undo)\n QObject.connect(redo, SIGNAL(\"triggered()\"), self.redo)\n\n def _populate_effect_data(self):\n q_effects = sorted([QString(effect_str) for effect_str in self.ma_unit.effects_dict.keys()])\n self.effect_cbo_box.blockSignals(True)\n self.effect_cbo_box.addItems(q_effects)\n self.effect_cbo_box.blockSignals(False)\n self.effect_cbo_box.setCurrentIndex(q_effects.index(QString(self.cur_effect)))\n \n def get_effect_names(self):\n return self.ma_unit.get_effect_names()\n \n def set_current_effect(self):\n '''Fills in text boxes with data from ma unit'''\n\n txt_boxes = dict(effect=self.effect_txt_box, lower=self.low_txt_box, upper=self.high_txt_box)\n calc_fncs.helper_set_current_effect(ma_unit=self.ma_unit,\n txt_boxes=txt_boxes, current_effect=self.cur_effect,\n group_str=self.group_str, data_type=\"binary\",\n mult=self.mult)\n \n self.change_row_color_according_to_metric()\n \n def change_row_color_according_to_metric(self):\n # Change color of bottom rows of table according one or two-arm metric\n curr_effect_is_one_arm = self.cur_effect in BINARY_ONE_ARM_METRICS\n for row in (1,2):\n for col in range(3):\n item = self.raw_data_table.item(row, col)\n if curr_effect_is_one_arm:\n item.setBackground(QBrush(QColor(Qt.gray)))\n else:\n # just reset the item\n text = item.text()\n self.raw_data_table.blockSignals(True)\n popped_item = self.raw_data_table.takeItem(row, col)\n self.raw_data_table.blockSignals(False)\n del popped_item\n self._set_val(row, col, text)\n \n def effect_changed(self):\n '''Called when a new effect is selected in the combo box'''\n \n self.cur_effect = unicode(self.effect_cbo_box.currentText().toUtf8(), \"utf-8\")\n self.group_str = self.get_cur_group_str()\n \n self.try_to_update_cur_outcome()\n self.set_current_effect()\n \n self.enable_txt_box_input()\n self.enable_back_calculation_btn()\n \n def _text_box_value_is_between_bounds(self, val_str, new_text):\n display_scale_val = \"\"\n \n get_disp_scale_val_if_valid = partial(\n calc_fncs.evaluate, new_text=new_text, ma_unit=self.ma_unit,\n curr_effect=self.cur_effect, group_str=self.group_str,\n conv_to_disp_scale = partial(meta_py_r.binary_convert_scale,\n metric_name=self.cur_effect,\n convert_to=\"display.scale\"),\n parent=self,\n mult=self.mult)\n \n calc_fncs.block_signals(self.entry_widgets, True)\n try:\n if val_str == \"est\" and not is_empty(new_text):\n display_scale_val = get_disp_scale_val_if_valid(ci_param='est')\n elif val_str == \"lower\" and not is_empty(new_text):\n display_scale_val = get_disp_scale_val_if_valid(ci_param='low')\n elif val_str == \"upper\" and not is_empty(new_text):\n display_scale_val = get_disp_scale_val_if_valid(ci_param='high')\n except Exception:\n calc_fncs.block_signals(self.entry_widgets, False)\n return False,False\n calc_fncs.block_signals(self.entry_widgets, False)\n print(\"Val_str: %s\" % val_str)\n return True,display_scale_val\n \n \n def _get_txt_from_val_str(self, val_str):\n if val_str == \"est\":\n return str(self.effect_txt_box.text())\n elif val_str == \"lower\":\n return str(self.low_txt_box.text())\n elif val_str == \"upper\":\n return str(self.high_txt_box.text())\n return None # should never happen\n \n \n def val_changed(self, val_str):\n # Backup form state\n old_ma_unit, old_table = self._save_ma_unit_and_table_state(\n table = self.raw_data_table,\n ma_unit = self.ma_unit, \n use_old_value=False)\n \n new_text = self._get_txt_from_val_str(val_str)\n \n no_errors, display_scale_val = self._text_box_value_is_between_bounds(val_str, new_text)\n if no_errors is False: # There are errors\n self.restore_ma_unit_and_table(old_ma_unit,old_table)\n calc_fncs.block_signals(self.entry_widgets, True)\n if val_str == \"est\":\n self.effect_txt_box.setFocus()\n elif val_str == \"lower\":\n self.low_txt_box.setFocus()\n elif val_str == \"upper\":\n self.high_txt_box.setFocus()\n calc_fncs.block_signals(self.entry_widgets, False)\n return\n \n # If we got to this point it means everything is ok so far\n try:\n if display_scale_val not in EMPTY_VALS:\n display_scale_val = float(display_scale_val)\n else:\n display_scale_val = None\n except ValueError:\n # a number wasn't entered; ignore\n # should probably clear out the box here, too.\n print \"fail.\"\n return None\n \n calc_scale_val = meta_py_r.binary_convert_scale(display_scale_val,\n self.cur_effect, convert_to=\"calc.scale\")\n \n if val_str == \"est\":\n self.ma_unit.set_effect(self.cur_effect, self.group_str, calc_scale_val)\n elif val_str == \"lower\":\n self.ma_unit.set_lower(self.cur_effect, self.group_str, calc_scale_val)\n elif val_str == \"upper\":\n self.ma_unit.set_upper(self.cur_effect, self.group_str, calc_scale_val)\n \n new_ma_unit, new_table = self._save_ma_unit_and_table_state(\n table = self.raw_data_table, ma_unit = self.ma_unit,\n use_old_value=False)\n restore_old_f = lambda: self.restore_ma_unit_and_table(old_ma_unit, old_table)\n restore_new_f = lambda: self.restore_ma_unit_and_table(new_ma_unit, new_table)\n command = calc_fncs.CommandFieldChanged(restore_new_f=restore_new_f, restore_old_f=restore_old_f, parent=self)\n self.undoStack.push(command)\n \n def _update_raw_data(self):\n ''' Generates the 2x2 table with whatever parametric data was provided '''\n ''' Sets #events and #subjects in binary table'''\n \n print(\"_update_raw_data:\")\n for row, group in enumerate(self.cur_groups):\n for col in (0, 2):\n adjusted_index = 0 if col == 0 else 1\n val = self.ma_unit.get_raw_data_for_group(group)[adjusted_index]\n self._set_val(row, col, val)\n \n def _update_ma_unit(self):\n ''' Copy data from binary data table to the MA_unit'''\n ''' \n Walk over the entries in the matrix (which may have been updated\n via imputation in the cell_changed method) corresponding to the \n raw data in the underlying meta-analytic unit and update the values.\n '''\n for row in range(2):\n for col in (0, 2):\n adjusted_col = 1 if col == 2 else 0\n self.ma_unit.get_raw_data_for_group(self.cur_groups[row])[adjusted_col] = self._get_int(row, col)\n \n print \"%s, %s: %s\" % (row, col, self._get_int(row, col))\n print \"ok -- raw data is now: %s\" % calc_fncs.get_raw_data(self.ma_unit, self.cur_groups)\n \n def _cell_data_not_valid(self, celldata_string):\n # ignore blank entries\n if celldata_string.trimmed() == \"\" or celldata_string is None:\n return None\n\n if not is_a_float(celldata_string):\n return \"Raw data needs to be numeric.\"\n\n if not is_an_int(celldata_string):\n return \"Expecting count data -- you provided a float (?)\"\n\n if int(celldata_string) < 0:\n return \"Counts cannot be negative.\"\n return None\n \n def restore_ma_unit(self, old_ma_unit):\n ''' Restores the ma_unit data and resets the form'''\n self.ma_unit.__dict__ = copy.deepcopy(old_ma_unit.__dict__)\n print(\"Restored ma_unit data: %s\" % str(self.ma_unit.get_raw_data_for_groups(self.cur_groups)))\n \n self.initialize_form() # clear form first\n self._update_raw_data()\n self.set_current_effect()\n self._update_data_table()\n self.enable_back_calculation_btn()\n #self.set_clear_btn_color()\n \n \n def restore_table(self, old_table):\n nrows = len(old_table)\n ncols = len(old_table[0])\n \n for row in range(nrows):\n for col in range(ncols):\n self.raw_data_table.blockSignals(True)\n self._set_val(row, col, old_table[row][col])\n self.raw_data_table.blockSignals(False)\n self.check_table_consistency.run()\n \n def restore_ma_unit_and_table(self, old_ma_unit, old_table):\n self.restore_ma_unit(old_ma_unit)\n self.restore_table(old_table)\n \n def _save_ma_unit_and_table_state(self, table, ma_unit, row=None, col=None,\n old_value=None, use_old_value=True):\n # Make backup of table info...\n old_table = calc_fncs.save_table_data(table)\n if use_old_value:\n old_table[row][col] = old_value # ...from BEFORE the cell changed\n \n # Make backup copy of ma_unit\n old_ma_unit = copy.deepcopy(ma_unit)\n return old_ma_unit, old_table\n \n def cell_changed(self, row, col):\n # tries to make sense of user input before passing\n # on to the R routine\n\n old_ma_unit, old_table = self._save_ma_unit_and_table_state(\n table = self.raw_data_table,\n ma_unit = self.ma_unit, \n old_value = self.current_item_data,\n row = row, col = col, use_old_value=True)\n \n try:\n # Test if entered data is valid (a number)\n warning_msg = self._cell_data_not_valid(self.raw_data_table.item(row, col).text())\n if warning_msg:\n raise Exception(\"Invalid Cell Data\")\n \n self._update_data_table() # calculate rest of table (provisionally) based on new entry\n warning_msg = self.check_table_consistency.run()\n if warning_msg:\n raise Exception(\"Table no longer consistent.\")\n except Exception as e:\n msg = e.args[0]\n QMessageBox.warning(self.parent(), \"whoops\", msg) # popup warning\n self.restore_ma_unit_and_table(old_ma_unit,old_table) # brings things back to the way they were\n return # and leave\n \n self._update_ma_unit() # table widget --> ma_unit\n self.try_to_update_cur_outcome() # update metric in ma_unit and in table\n\n new_ma_unit, new_table = self._save_ma_unit_and_table_state(\n table = self.raw_data_table,\n ma_unit = self.ma_unit, \n row = row, col = col, use_old_value = False)\n #restore_f = self.restore_ma_unit_and_table\n #command = calc_fncs.CommandFieldChanged(old_ma_unit, new_ma_unit, old_table, new_table, restore_f=restore_f, parent=self)\n restore_old_f = lambda: self.restore_ma_unit_and_table(old_ma_unit, old_table)\n restore_new_f = lambda: self.restore_ma_unit_and_table(new_ma_unit, new_table)\n command = calc_fncs.CommandFieldChanged(restore_new_f=restore_new_f, restore_old_f=restore_old_f, parent=self) \n self.undoStack.push(command)\n \n \n def _get_table_vals(self):\n ''' Package table from 2x2 table in to a dictionary'''\n \n vals_d = {}\n vals_d[\"c11\"] = self._get_int(0, 0)\n vals_d[\"c12\"] = self._get_int(0, 1)\n vals_d[\"c21\"] = self._get_int(1, 0)\n vals_d[\"c22\"] = self._get_int(1, 1)\n vals_d[\"r1sum\"] = self._get_int(0, 2)\n vals_d[\"r2sum\"] = self._get_int(1, 2)\n vals_d[\"c1sum\"] = self._get_int(2, 0)\n vals_d[\"c2sum\"] = self._get_int(2, 1)\n vals_d[\"total\"] = self._get_int(2, 2)\n return vals_d\n \n def clear_column(self, col):\n '''Clears out column in table and ma_unit'''\n\n for row in range(3):\n self.raw_data_table.blockSignals(True)\n self._set_val(row, col, None) \n self.raw_data_table.blockSignals(False)\n \n self._update_ma_unit()\n \n def _set_vals(self, computed_d):\n '''Sets values in table widget'''\n self.raw_data_table.blockSignals(True)\n self._set_val(0, 0, computed_d[\"c11\"])\n self._set_val(0, 1, computed_d[\"c12\"])\n self._set_val(1, 0, computed_d[\"c21\"])\n self._set_val(1, 1, computed_d[\"c22\"]) \n self._set_val(0, 2, computed_d[\"r1sum\"])\n self._set_val(1, 2, computed_d[\"r2sum\"])\n self._set_val(2, 0, computed_d[\"c1sum\"])\n self._set_val(2, 1, computed_d[\"c2sum\"]) \n self._set_val(2, 2, computed_d[\"total\"]) \n self.raw_data_table.blockSignals(False) \n \n def _set_val(self, row, col, val):\n if is_NaN(val): # get out quick\n print \"%s is not a number\" % val\n return\n \n try:\n self.raw_data_table.blockSignals(True)\n str_val = \"\" if val in EMPTY_VALS else str(int(val))\n if self.raw_data_table.item(row, col) == None:\n self.raw_data_table.setItem(row, col, QTableWidgetItem(str_val))\n else:\n self.raw_data_table.item(row, col).setText(str_val)\n print(\" setting (%d,%d) to '%s'\" % (row,col,str_val))\n \n# # disable item\n# if str_val != \"\": \n# item = self.raw_data_table.item(row, col)\n# newflags = item.flags() & ~Qt.ItemIsEditable\n# item.setFlags(newflags)\n \n self.raw_data_table.blockSignals(False)\n except:\n print(\" Got to except in _set_val when trying to set (%d,%d)\" % (row, col))\n raise\n \n def _build_dict(self):\n d = dict(zip([\"control.n.outcome\", \"control.N\", \"tx.n.outcome\", \"tx.N\"], self.raw_data))\n d[\"estimate\"] = self.ma_unit.get_estimate(self.cur_effect, self.group_str)\n return d\n \n def _update_data_table(self): \n '''Fill in 2x2 table from other entries in the table '''\n \n self.raw_data_table.blockSignals(True)\n \n params = self._get_table_vals()\n computed_params = calc_fncs.compute_2x2_table(params)\n print \"Computed Params\", computed_params\n if computed_params:\n self._set_vals(computed_params) # computed --> table widget\n \n self.raw_data_table.blockSignals(False)\n \n def _is_empty(self, i, j):\n val = self.raw_data_table.item(i, j)\n return val is None or val.text() == \"\"\n \n def _get_int(self, i, j):\n '''Get value from cell specified by row=i, col=j as an integer'''\n if not self._is_empty(i, j):\n val = int(float(self.raw_data_table.item(i, j).text()))\n #print(\"Val from _get_int: %d\" % val)\n return val\n else:\n return None # its good to be explicit\n \n def _isBlank(self, x):\n return x is None or x == \"\"\n \n def try_to_update_cur_outcome(self):\n e1, n1, e2, n2 = self.ma_unit.get_raw_data_for_groups(self.cur_groups)\n print(\"e1: %s, n1: %s, e2: %s, n2: %s\" % (str(e1),str(n1),str(e2),str(n2)))\n \n two_arm_raw_data_ok = not any([self._isBlank(x) for x in [e1, n1, e2, n2]])\n one_arm_raw_data_ok = not any([self._isBlank(x) for x in [e1, n1]])\n curr_effect_is_one_arm = self.cur_effect in BINARY_ONE_ARM_METRICS\n curr_effect_is_two_arm = self.cur_effect in BINARY_TWO_ARM_METRICS\n \n # if None is in the raw data, should we clear out current outcome?\n if two_arm_raw_data_ok or (curr_effect_is_one_arm and one_arm_raw_data_ok):\n if curr_effect_is_two_arm:\n est_and_ci_d = meta_py_r.effect_for_study(e1, n1, e2, n2, metric=self.cur_effect, conf_level=self.global_conf_level)\n else:\n # binary, one-arm\n est_and_ci_d = meta_py_r.effect_for_study(e1, n1, two_arm=False, metric=self.cur_effect, conf_level=self.global_conf_level)\n \n est, low, high = est_and_ci_d[\"calc_scale\"] # calculation (e.g., log) scale\n self.ma_unit.set_effect_and_ci(self.cur_effect, self.group_str, est, low, high, mult=self.mult)\n self.set_current_effect()\n \n def clear_form(self):\n # For undo/redo\n old_ma_unit, old_table = self._save_ma_unit_and_table_state(\n table = self.raw_data_table,\n ma_unit = self.ma_unit, \n use_old_value=False)\n \n blank_vals = {\"c11\" : \"\",\n \"c12\" : \"\",\n \"r1sum\": \"\",\n \"c21\" : \"\",\n \"c22\" : \"\",\n \"r2sum\": \"\",\n \"c1sum\": \"\",\n \"c2sum\": \"\",\n \"total\": \"\"}\n\n self._set_vals(blank_vals)\n self._update_ma_unit()\n \n # clear out effects stuff\n for metric in BINARY_ONE_ARM_METRICS + BINARY_TWO_ARM_METRICS:\n if ((self.cur_effect in BINARY_TWO_ARM_METRICS and metric in BINARY_TWO_ARM_METRICS) or\n (self.cur_effect in BINARY_ONE_ARM_METRICS and metric in BINARY_ONE_ARM_METRICS)):\n self.ma_unit.set_effect_and_ci(metric, self.group_str, None, None, None, mult=self.mult)\n else:\n # TODO: Do nothing for now..... treat the case where we have to switch group strings down the line\n pass\n \n # clear line edits\n self.set_current_effect()\n calc_fncs.reset_table_item_flags(self.raw_data_table)\n ####self.enable_txt_box_input()\n \n \n new_ma_unit, new_table = self._save_ma_unit_and_table_state(\n table = self.raw_data_table, ma_unit = self.ma_unit,\n use_old_value=False)\n restore_old_f = lambda: self.restore_ma_unit_and_table(old_ma_unit, old_table)\n restore_new_f = lambda: self.restore_ma_unit_and_table(new_ma_unit, new_table)\n command = calc_fncs.CommandFieldChanged(restore_new_f=restore_new_f, restore_old_f=restore_old_f, parent=self)\n self.undoStack.push(command)\n \n def enable_txt_box_input(self):\n # meta_globals.enable_txt_box_input(self.effect_txt_box, self.low_txt_box,\n # self.high_txt_box)\n # print(\"Enabled text box input\")\n pass\n \n def get_cur_group_str(self):\n # Inspired from get_cur_group_str of ma_data_table_model\n \n if self.cur_effect in BINARY_ONE_ARM_METRICS:\n group_str = self.cur_groups[0] \n else:\n group_str = \"-\".join(self.cur_groups)\n return group_str\n \n ####### Undo framework ############\n def undo(self):\n print(\"undoing....\")\n self.undoStack.undo()\n \n def redo(self):\n print(\"redoing....\")\n self.undoStack.redo()\n #################################\n \n \n################################################################################\nclass ChooseBackCalcResultForm(QDialog, forms.ui_choose_back_calc_result_form.Ui_ChooseBackCalcResultForm):\n def __init__(self, imputed_data, parent=None):\n super(ChooseBackCalcResultForm, self).__init__(parent)\n self.setupUi(self)\n \n op1 = imputed_data[\"op1\"] # option 1 data\n a, b, c, d = op1[\"a\"], op1[\"b\"], op1[\"c\"], op1[\"d\"]\n a, b, c, d = int(round(a)), int(round(b)), int(round(c)), int(round(d))\n option1_txt = \"Group 1:\\n #events: %d\\n Total: %d\\n\\nGroup 2:\\n #events: %d\\n Total: %d\" % (a, b, c, d)\n \n op2 = imputed_data[\"op2\"]\n a, b, c, d = op2[\"a\"], op2[\"b\"], op2[\"c\"], op2[\"d\"]\n a, b, c, d = int(round(a)), int(round(b)), int(round(c)), int(round(d))\n option2_txt = \"Group 1:\\n #events: %d\\n Total: %d\\n\\nGroup 2:\\n #events: %d\\n Total: %d\" % (a, b, c, d)\n \n self.choice1_btn.setText(option1_txt)\n self.choice2_btn.setText(option2_txt)\n self.info_label.setText(\"The back-calculation has resulted in two \"\n \"possible sets of choices for the counts. Please\"\n \" choose one from below. These choices do not \"\n \"reflect possible corrections for zero counts.\")\n \n self.adjustSize()\n\n def getChoice(self):\n choices = [\"op1\", \"op2\"]\n \n if self.choice1_btn.isChecked():\n return choices[0] # op1\n else:\n return choices[1] # op2" }, { "alpha_fraction": 0.64552241563797, "alphanum_fraction": 0.6497201323509216, "avg_line_length": 52.625, "blob_id": "a1d9e5176653771004a0dbd0428c38dc0fc9fd42", "content_id": "7b5816a53d7b54b50a7484195a2897753e10ddf7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 2144, "license_type": "no_license", "max_line_length": 386, "num_lines": 40, "path": "/doc/create_dataset.html", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "<html>\n<head>\n<link href=\"css/help.css\" \ntype=text/css rel=stylesheet>\n<h2><a id=\"top\">Creating a New Data Set</a></h2>\n<p>To create a new data set in the main Open Meta-Analyst window, do the following steps. \n <ol>\n <li>Select <b>new dataset</b> fon the <b>Dataset</b> menu.\n <p><img src=\"images/dataset_menu.png\"></p>\n </li> \n <li>In the New Dataset dialog, type the name of the data set:\n <p><img src=\"images/dataset_name.png\"></img></p>\n </li>\n <li>Select the type of data you are working with, which can be of the following types:\n <ul>\n <li>Binary or continuous</li>\n <li>Diagnostic</li>\n </ul>\n <p> <img src=\"images/dataset_type.png\"</img></p>\n <p>In this example, the data type is binary. See <a href=\"data_sets.html#top\">Data Sets</a> for description of the data types available in Open Meta-Analyst.</p></li>\n <li> Select <b>create new</b>. \n This opens the <b>add new outcome</b> dialog shown below.\n <p><img \n src=\"images/outcome_dialog.png\"></p></li>\n <li>In the dialog, type a name for the outcome of the treatment and select the \n type of outcome, depending on the type of \n data you are analyzing. </li>\n <li>Click <b>OK</b>.\n </ol>\n <p>This opens the Open Meta-Analyst window. For binary data, the window appears as shown below.</p>\n <p><img \n src=\"images/added_binary_fields.png\"></p>\n <p>The next section shows how to <a href=\"entering_data.html#top\">enter data</a> in the data set.</p>\n <p><b>Note:</b> You can also create a new data set in the Welcome to Open Meta-Analyst dialog by clicking the <b>create new</b> button and following steps similar the procedure above. </p>\n <p><a href=\"#top\">Back to top</a></p>\n <br>\n<table id=\"nav\" cellspacing=\"0\"><tr valign=\"center\"><td align=\"left\" width=\"20\"><a href=\"analysis.html#top\"><img src=\"images/b_prev.gif\" border=\"0\"></td><td align=\"left\">Example - Binary Data</td></a><td>&nbsp;</td><td align=\"right\">Entering and Saving Data</td><td align=\"right\" width=\"20\"><a href=\"entering_data.html#top\"><img src=\"images/b_next.gif\" border=\"0\"></a></td></tr></table>\n<br>\n</body>\n</html>" }, { "alpha_fraction": 0.46813881397247314, "alphanum_fraction": 0.5167192220687866, "avg_line_length": 34.24444580078125, "blob_id": "e98b3d4a817836d2a39335bd763fda86bf8af2a8", "content_id": "32b92cf94fd3296ccd9e95f4048f520250ab97fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1585, "license_type": "no_license", "max_line_length": 96, "num_lines": 45, "path": "/src/R/install.r", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "R.version <- packageDescription(\"base\")[\"Version\"]\nvalid.versions <- c(\"2.9.0\", \"2.9.1\", \"2.9.2\", \"2.10.0\", \"2.10.1\", \"2.11.0\", \"2.11.1\", \"2.12.0\")\nif (is.element(R.version, valid.versions)) {\n start_dir <- getwd()\n packages <- installed.packages()\n package.names <- names(packages[,1])\n type <- getOption(\"pkgType\")\n local.pkgs <- NULL\n repo.pkgs <- NULL\n \n if (!is.element(\"igraph\", package.names)) {\n repo.pkgs <- c(\"igraph\")\n }\n if (!is.element(\"metafor\", package.names)) {\n if (is.element(R.version, c(\"2.10.0\", \"2.10.1\", \"2.11.0\", \"2.11.1\"))) {\n repo.pkgs <- c(repo.pkgs, \"metafor\")\n } else {\n if (type == \"win.binary\") {\n local.pkgs <- c(local.pkgs, \"metafor_0.5-7.zip\")\n }\n if (type == \"source\") {\n local.pkgs <- c(local.pkgs, \"metafor_0.5-7.tar.gz\")\n }\n }\n } \n if (!is.element(\"openmetar\", package.names)) {\n if (type == \"win.binary\") {\n local.pkgs <- c(local.pkgs, \"openmetar_1.0.zip\")\n }\n if (type == \"source\") {\n local.pkgs <- c(local.pkgs, \"openmetar_1.0.tar.gz\")\n }\n } \n if (length(repo.pkgs) > 0) {\n # install packages from repository, if necessary\n install.packages(repo.pkgs)\n }\n setwd(start_dir)\n if (length(local.pkgs > 0)) {\n install.packages(local.pkgs, repo=NULL)\n }\n} else {\n cat(\"Your version of R is not compatible with openmetar.\\n\")\n cat(\"Compatible versions are 2.9.2, 2.10.0, 2.10.1, 2.11.0, and 2.11.1.\\n\")\n}" }, { "alpha_fraction": 0.458781361579895, "alphanum_fraction": 0.48745518922805786, "avg_line_length": 19.461538314819336, "blob_id": "1ffd8ea5e60aeb79da59a777e4a259b662f7676f", "content_id": "a0c84d1786f0162c91cfec2713f916026f5e97d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 279, "license_type": "no_license", "max_line_length": 64, "num_lines": 13, "path": "/src/R/HSROC/R/REFSTD_6_SE.R", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "REFSTD_6_SE <-\r\nfunction (refstd, N.refstd, A.Se2, B.Se2) \r\n{\r\n if (refstd == TRUE) {\r\n se = 1\r\n }\r\n else {\r\n se = rbeta(n = N.refstd, shape1 = A.Se2, shape2 = B.Se2)\r\n }\r\n results = list(se)\r\n names(results) = list(\"SE\")\r\n return(results)\r\n}\r\n" }, { "alpha_fraction": 0.521504819393158, "alphanum_fraction": 0.5339603424072266, "avg_line_length": 41.3943977355957, "blob_id": "7ddcb1b0f533f7528d7b28570fde2f33e935f523", "content_id": "ca7617344ba94d194ad337db01d9d603971ff133", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19670, "license_type": "no_license", "max_line_length": 584, "num_lines": 464, "path": "/src/results_window.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "#############################################\n# #\n# Byron C. Wallace George E. Dietz #\n# Brown University CEBM@Brown #\n# OpenMeta[analyst] #\n# #\n# #\n# This is the component responsible #\n# for rendering MA results. #\n# #\n#############################################\n\nimport random\nfrom PyQt4.Qt import *\nimport os\nimport sys\nimport ui_results_window\nimport edit_forest_plot_form\nimport meta_py_r\n#import shutil\n\nPageSize = (612, 792)\npadding = 25\nhorizontal_padding = 75\nSCALE_P = .5 # percent images are to be scaled\n\n# these are special forest plots, in that multiple parameters objects are\n# require to re-generate them (and we invoke a different method!)\nSIDE_BY_SIDE_FOREST_PLOTS = (\"NLR and PLR Forest Plot\", \"Sensitivity and Specificity\", \"Cumulative Forest Plot\")\nROW_HEIGHT = 15 # by trial-and-error; seems to work very well\n\nclass ResultsWindow(QMainWindow, ui_results_window.Ui_ResultsWindow):\n\n def __init__(self, results, parent=None):\n\n super(ResultsWindow, self).__init__(parent)\n self.setupUi(self)\n self.copied_item = QByteArray()\n self.paste_offset = 5\n self.add_offset = 5\n self.buffer_size = 2\n self.prev_point = QPoint()\n self.borders = []\n self.printer = QPrinter(QPrinter.HighResolution)\n self.printer.setPageSize(QPrinter.Letter)\n\n QObject.connect(self.nav_tree, SIGNAL(\"itemClicked(QTreeWidgetItem*, int)\"),\n self.item_clicked)\n \n self.psuedo_console.blockSignals(False) \n QObject.connect(self.psuedo_console, SIGNAL(\"returnPressed(void)\"),\n self.process_console_input)\n QObject.connect(self.psuedo_console, SIGNAL(\"upArrowPressed()\"),\n self.f)\n QObject.connect(self.psuedo_console, SIGNAL(\"downArrowPressed()\"),\n self.f)\n \n \n self.nav_tree.setHeaderLabels([\"results\"])\n self.nav_tree.setItemsExpandable(True)\n self.x_coord = 5\n self.y_coord = 5\n\n # set (default) splitter sizes\n self.splitter.setSizes([400, 100])\n self.results_nav_splitter.setSizes([200,500])\n\n self.scene = QGraphicsScene(self)\n\n self.images = results[\"images\"]\n print \"images returned from analytic routine: %s\" % self.images\n self.image_order = None\n if \"image_order\" in results:\n self.image_order = results[\"image_order\"]\n print \"image display order: %s\" % self.image_order\n\n self.params_paths = {}\n if \"image_params_paths\" in results:\n self.params_paths = results[\"image_params_paths\"]\n \n self.image_var_names = results[\"image_var_names\"]\n self.set_psuedo_console_text()\n self.items_to_coords = {}\n self.texts = results[\"texts\"]\n\n\n # first add the text to self.scene\n self.add_text()\n\n self.y_coord += ROW_HEIGHT/2.0\n\n # additional padding for Windows..\n # again, heuristic. I don't know\n # why windows requires so much padding.\n if sys.platform.startswith('win'):\n self.y_coord += 2*ROW_HEIGHT\n\n # and now the images\n self.add_images()\n\n # reset the scene\n self.graphics_view.setScene(self.scene)\n self.graphics_view.ensureVisible(QRectF(0,0,0,0))\n\n\n\n def f(self):\n print self.current_line()\n\n def set_psuedo_console_text(self):\n text = [\"\\t\\tOpenMeta(analyst)\",\n \"This is a pipe to the R console. The image names are as follows:\"]\n if self.image_var_names is not None:\n for image_var_name in self.image_var_names.values():\n text.append(image_var_name)\n self.psuedo_console.setPlainText(QString(\"\\n\".join(text)))\n self.psuedo_console.append(\">> \")\n\n\n def add_images(self):\n # temporary fix!\n image_order = self.images.keys()\n \n if self.image_order is not None:\n image_order = self.image_order\n \n ungrouped_images = [(title, self.images[title]) for title in image_order]\n ordered_images = ungrouped_images\n \n if self.image_order is None:\n # add to the arguments to make more groups, also make sure to add them\n # in add_text\n grouped_images = self._group_items(ungrouped_images,\n [\"Likelihood\",\"nlr\",\"plr\"],\n [\"sens\",\"spec\"])\n ordered_images = grouped_images\n \n \n for title,image in ordered_images:\n print \"title: %s; image: %s\" % (title, image)\n cur_y = max(0, self.y_coord)\n print \"cur_y: %s\" % cur_y\n # first add the title\n qt_item = self.add_title(title)\n\n pixmap = self.generate_pixmap(image)\n \n # if there is a parameters object associated with this object\n # (i.e., it is a forest plot of some variety), we pass it along\n # to the create_pixmap_item method to for the context_menu \n # construction\n params_path = None\n if self.params_paths is not None and title in self.params_paths:\n params_path = self.params_paths[title]\n\n img_shape, pos, pixmap_item = self.create_pixmap_item(pixmap, self.position(),\\\n title, image, params_path=params_path)\n \n self.items_to_coords[qt_item] = pos\n \n\n\n def generate_pixmap(self, image):\n # now the image\n pixmap = QPixmap(image)\n \n ###\n # we scale to address issue #23.\n # should probably pick a 'target' width/height, in case\n # others generate smaller images by default.\n scaled_width = SCALE_P*pixmap.width()\n scaled_height = SCALE_P*pixmap.height()\n \n\n if scaled_width > self.scene.width():\n self.scene.setSceneRect(0, 0, \\\n scaled_width+horizontal_padding,\\\n self.scene.height())\n \n\n pixmap = pixmap.scaled(scaled_width, scaled_height, \\\n transformMode=Qt.SmoothTransformation)\n\n return pixmap\n\n\n def add_text(self):\n \n # add to the arguments to make more groups, also make sure to add them\n # in add_images\n grouped_items = self._group_items(self.texts.items(),\n [\"Likelihood\",\"nlr\",\"plr\"],\n [\"sens\",\"spec\"])\n \n for title, text in grouped_items:\n try:\n print \"title: %s; text: %s\" % (title, text)\n cur_y = max(0, self.y_coord)\n print \"cur_y: %s\" % cur_y\n # first add the title\n qt_item = self.add_title(title)\n\n # now the text\n text_item_rect, pos = self.create_text_item(unicode(text), self.position())\n self.items_to_coords[qt_item] = pos\n except:\n pass\n \n def _group_items(self, items, *groups):\n '''Groups items together if their title contains an element in a group list.\n items is a tuple of key,value pairs i.e. (title,text)\n Each group is a list of strings to which item titles should be matched\n i.e: _group_items(items, ['NLR','PLR'], ['sens','spec']) '''\n \n def _get_group_id(key):\n for group_id, group in enumerate(groups):\n for grp_member in group:\n if key.lower().find(grp_member.lower()) != -1:\n return group_id\n return None\n \n # initialization\n grouped_items = []\n for i in range(len(groups)+1):\n grouped_items.append([])\n no_grp_index = len(groups)\n \n # main loop\n for key, value in items:\n group_id = _get_group_id(key)\n if group_id is None:\n grouped_items[no_grp_index].append((key,value))\n else:\n grouped_items[group_id].append((key,value))\n \n # return result\n result = []\n for x in grouped_items:\n result.extend(x)\n return result\n\n \n\n \n \n\n def add_title(self, title):\n print(\"Adding title\")\n text = QGraphicsTextItem()\n # I guess we should use a style sheet here,\n # but it seems like it'd be overkill.\n html_str = '<p style=\"font-size: 14pt; color: black; face:verdana\">%s</p>' % title\n text.setHtml(html_str)\n #text.setPos(self.position())\n print \" title at: %s\" % self.y_coord\n self.scene.addItem(text)\n qt_item = QTreeWidgetItem(self.nav_tree, [title])\n self.scene.setSceneRect(0, 0, self.scene.width(), self.y_coord + text.boundingRect().height() + padding)\n print(\" Setting position at (%d,%d)\" % (self.x_coord, self.y_coord)) \n text.setPos(self.position()) #####\n self.y_coord += text.boundingRect().height()\n return qt_item\n\n def item_clicked(self, item, column):\n print self.items_to_coords[item]\n self.graphics_view.centerOn(self.items_to_coords[item])\n\n def create_text_item(self, text, position):\n txt_item = QGraphicsTextItem(QString(text))\n txt_item.setFont(QFont(\"courier\", 12))\n txt_item.setToolTip(\"To copy the text:\\n1) Right click on the text and choose \\\"Select All\\\".\\n2) Right click again and choose \\\"Copy\\\".\")\n txt_item.setTextInteractionFlags(Qt.TextSelectableByMouse)\n self.scene.addItem(txt_item)\n # fix for issue #149; was formerly txt_item.boundingRect().size().height()\n \n #self.y_coord += txt_item.boundingRect.height() #ROW_HEIGHT*text.count(\"\\n\")\n self.scene.setSceneRect(0, 0, max(self.scene.width(),\n txt_item.boundingRect().size().width()),\n self.y_coord+txt_item.boundingRect().height()+padding)\n \n self.y_coord += txt_item.boundingRect().height() ###\n txt_item.setPos(position)\n \n return (txt_item.boundingRect(), position)\n\n def process_console_input(self):\n res = str(meta_py_r.execute_r_string(self.current_line()))\n\n # echo the result\n self.psuedo_console.append(QString(res))\n self.psuedo_console.append(\">> \")\n\n def current_line(self):\n last_line = self.psuedo_console.toPlainText().split(\"\\n\")[-1]\n return str(last_line.replace(\">>\", \"\")).strip()\n\n def _get_plot_type(self, title):\n # at present we use the *title* as the type --\n # this is currently _not_ set by the user, so it's\n # 'safe', but it's not exactly elegant. probably\n # we should return a type directly from R.\n # on other hand, this couples R + Python even\n # more...\n plot_type = None\n tmp_title = title.lower()\n if \"forest\" in tmp_title:\n plot_type = \"forest\"\n elif \"regression\" in tmp_title:\n plot_type = \"regression\"\n return plot_type\n\n def create_pixmap_item(self, pixmap, position, title, image_path,\\\n params_path=None, matrix=QMatrix()):\n item = QGraphicsPixmapItem(pixmap)\n item.setToolTip(\"To save the image:\\nright-click on the image and choose \\\"save image as\\\".\\nSave as png will correctly render non-latin fonts but does not respect changes to plot made through 'edit_plot ...'\")\n \n \n self.y_coord += item.boundingRect().size().height()\n# item.setFlags(QGraphicsItem.ItemIsSelectable|\n# QGraphicsItem.ItemIsMovable)\n item.setFlags(QGraphicsItem.ItemIsSelectable)\n\n\n self.scene.setSceneRect(0, 0, \\\n max(self.scene.width(), \\\n item.boundingRect().size().width()),\\\n self.y_coord + item.boundingRect().size().height() + padding)\n\n print \"creating item @:%s\" % position\n \n #item.setMatrix(matrix)\n self.scene.clearSelection()\n self.scene.addItem(item)\n item.setPos(position)\n \n # for now we're inferring the plot type (e.g., 'forest'\n # from the title of the plot (see in-line comments, above)\n plot_type = self._get_plot_type(title)\n\n # attach event handler for mouse-clicks, i.e., to handle\n # user right-clicks\n item.contextMenuEvent = self._make_context_menu(\n params_path, title, image_path, item, plot_type=plot_type)\n\n return (item.boundingRect().size(), position, item)\n\n\n\n\n def _make_context_menu(self, params_path, title, png_path, \n qpixmap_item, plot_type=\"forest\"):\n plot_img = QImage(png_path)\n \n \n def _graphics_item_context_menu(event):\n def add_save_as_pdf_menu_action(menu):\n action = QAction(\"save pdf image as...\", self)\n QObject.connect(action, SIGNAL(\"triggered()\"),\n lambda : self.save_image_as(params_path, title, \n plot_type=plot_type, format=\"pdf\"))\n menu.addAction(action)\n def add_save_as_png_menu_action(menu):\n action = QAction(\"save png image as...\", self)\n QObject.connect(action, SIGNAL(\"triggered()\"),\n lambda : self.save_image_as(params_path, title, \n plot_type=plot_type,\n unscaled_image = plot_img, format=\"png\"))\n menu.addAction(action)\n def add_edit_plot_menu_action(menu):\n # only know how to edit *simple* (i.e., _not_ side-by-side, as \n # in sens and spec plotted on the same canvass) forest plots for now\n if plot_type == \"forest\" and not self._is_side_by_side_fp(title):\n action = QAction(\"edit plot...\", self)\n QObject.connect(action, SIGNAL(\"triggered()\"),\n lambda : self.edit_image(params_path, title,\n png_path, qpixmap_item))\n menu.addAction(action)\n \n context_menu = QMenu(self)\n if params_path:\n add_save_as_pdf_menu_action(context_menu)\n add_save_as_png_menu_action(context_menu) \n add_edit_plot_menu_action(context_menu)\n else: # no params path given, just give them the png\n add_save_as_png_menu_action(context_menu)\n\n pos = event.screenPos()\n context_menu.popup(pos)\n event.accept()\n\n return _graphics_item_context_menu\n\n\n def _is_side_by_side_fp(self, title):\n return any([side_by_side in title for side_by_side in SIDE_BY_SIDE_FOREST_PLOTS])\n\n def save_image_as(self, params_path, title, plot_type=\"forest\", unscaled_image=None, format=None):\n \n if format not in [\"pdf\",\"png\"]:\n raise Exception(\"Invalid format, needs to be either pdf or png!\")\n \n if not unscaled_image:\n # note that the params object will, by convention,\n # have the (generic) name 'plot.data' -- after this\n # call, this object will be in the namespace\n meta_py_r.load_in_R(\"%s.plotdata\" % params_path)\n \n default_path = {\"forest\":\"forest_plot.pdf\",\n \"regression\":\"regression.pdf\"}[plot_type]\n \n # where to save the graphic?\n file_path = unicode(QFileDialog.getSaveFileName(self, \n \"OpenMeta[Analyst] -- save plot as\", \n QString(default_path)))\n \n # now we re-generate it, unless they canceled, of course\n if file_path != \"\":\n if plot_type == \"forest\":\n if self._is_side_by_side_fp(title):\n meta_py_r.generate_forest_plot(file_path, side_by_side=True)\n else:\n meta_py_r.generate_forest_plot(file_path)\n elif plot_type == \"regression\":\n meta_py_r.generate_reg_plot(file_path)\n else:\n print \"sorry -- I don't know how to draw %s plots!\" % plot_type\n else: # case where we just have the png and can't regenerate the pdf from plot data\n default_path = '.'.join([title.replace(' ','_'),\"png\"])\n file_path = unicode(QFileDialog.getSaveFileName(self, \"OpenMeta[Analyst] -- save plot as\", QString(default_path)))\n unscaled_image.save(QString(file_path),\"PNG\")\n \n\n def edit_image(self, params_path, title, png_path, pixmap_item):\n plot_editor_window = edit_forest_plot_form.EditPlotWindow(\\\n params_path, png_path,\\\n pixmap_item, parent=self)\n if plot_editor_window is not None:\n plot_editor_window.show()\n else:\n # TODO show a warning\n print \"sorry - can't edit\"\n \n def position(self):\n point = QPoint(self.x_coord, self.y_coord)\n return self.graphics_view.mapToScene(point)\n\n\nif __name__ == \"__main__\":\n \n # make test results based on results from when meta-analysis run from amino sample data\n test_results = {}\n test_results['images'] = {'Forest Plot': './r_tmp/forest.png'}\n test_results['texts'] = {'Weights':'studies weights\\nGonzalez 1993 7.3%\\nPrins 1993 6.2%\\nGiamarellou 1991 2.1%\\nMaller 1993 10.7%\\nSturm 1989 2.0%\\nMarik 1991 12.2%\\nMuijsken 1988 7.5%\\nVigano 1992 1.8%\\nHansen 1988 5.3%\\nDe Vries 1990 6.1%\\nMauracher 1989 2.2%\\nNordstrom 1990 5.3%\\nRozdzinski 1993 10.3%\\nTer Braak 1990 8.7%\\nTulkens 1988 1.2%\\nVan der Auwera 1991 2.0%\\nKlastersky 1977 6.0%\\nVanhaeverbeek 1993 1.2%\\nHollender 1989 1.8%\\n',\n 'Summary':'Binary Random-Effects Model\\n\\nMetric: Odds Ratio\\n\\n Model Results\\n\\n Estimate Lower bound Upper bound p-Value \\n\\n 0.770 0.485 1.222 0.267 \\n\\n\\n Heterogeneity\\n\\n tau^2 Q(df=18) Het. p-Value I^2 \\n\\n 0.378 33.360 0.015 46% \\n\\n\\n Results (log scale)\\n\\n Estimate Lower bound Upper bound Std. error \\n\\n -0.262 -0.724 0.200 0.236 \\n\\n\\n'\n }\n test_results['image_var_names'] = {'forest plot': 'forest_plot'}\n test_results['image_params_paths'] = {'Forest Plot': 'r_tmp/1369769105.72079'} # change this number as necessary\n test_results['image_order'] = None\n \n \n app = QApplication(sys.argv)\n resultswindow = ResultsWindow(test_results)\n resultswindow.show()\n sys.exit(app.exec_())" }, { "alpha_fraction": 0.6640454530715942, "alphanum_fraction": 0.6937527060508728, "avg_line_length": 43, "blob_id": "eb06bc7c1b0ffae995858022097bdc20acd9d6aa", "content_id": "b2d928f15e1aec285d1bf74e0757a35348a7e394", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2289, "license_type": "no_license", "max_line_length": 153, "num_lines": 52, "path": "/src/forms/ui_cov_subgroup_dlg.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'cov_subgroup_dlg.ui'\n#\n# Created: Tue Jun 14 12:21:35 2011\n# by: PyQt4 UI code generator 4.7.7\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt4 import QtCore, QtGui\n\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n _fromUtf8 = lambda s: s\n\nclass Ui_cov_subgroup_dialog(object):\n def setupUi(self, cov_subgroup_dialog):\n cov_subgroup_dialog.setObjectName(_fromUtf8(\"cov_subgroup_dialog\"))\n cov_subgroup_dialog.resize(301, 132)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n cov_subgroup_dialog.setFont(font)\n self.buttonBox = QtGui.QDialogButtonBox(cov_subgroup_dialog)\n self.buttonBox.setGeometry(QtCore.QRect(10, 100, 281, 32))\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n self.buttonBox.setFont(font)\n self.buttonBox.setOrientation(QtCore.Qt.Horizontal)\n self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)\n self.buttonBox.setObjectName(_fromUtf8(\"buttonBox\"))\n self.label = QtGui.QLabel(cov_subgroup_dialog)\n self.label.setGeometry(QtCore.QRect(20, 30, 121, 41))\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n font.setPointSize(10)\n self.label.setFont(font)\n self.label.setObjectName(_fromUtf8(\"label\"))\n self.cov_subgroup_cbo_box = QtGui.QComboBox(cov_subgroup_dialog)\n self.cov_subgroup_cbo_box.setGeometry(QtCore.QRect(130, 40, 152, 20))\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n self.cov_subgroup_cbo_box.setFont(font)\n self.cov_subgroup_cbo_box.setObjectName(_fromUtf8(\"cov_subgroup_cbo_box\"))\n\n self.retranslateUi(cov_subgroup_dialog)\n QtCore.QMetaObject.connectSlotsByName(cov_subgroup_dialog)\n\n def retranslateUi(self, cov_subgroup_dialog):\n cov_subgroup_dialog.setWindowTitle(QtGui.QApplication.translate(\"cov_subgroup_dialog\", \"select covariate\", None, QtGui.QApplication.UnicodeUTF8))\n self.label.setText(QtGui.QApplication.translate(\"cov_subgroup_dialog\", \"covariate for \\n\"\n\"subgroups:\", None, QtGui.QApplication.UnicodeUTF8))\n\n" }, { "alpha_fraction": 0.3883495032787323, "alphanum_fraction": 0.4207119643688202, "avg_line_length": 23.75, "blob_id": "5320bdf53d041d186ef6f886a56fc0bfb1514c2a", "content_id": "19d13c604107bd0c1d00beebc0ed65f8e2d1fc47", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 309, "license_type": "no_license", "max_line_length": 45, "num_lines": 12, "path": "/src/R/HSROC/R/truncnorm.R", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "truncnorm <-\r\nfunction (t, n, m, sd, limit) \r\n{\r\n l = limit[, 1] * t + limit[, 2] * (1 - t)\r\n u = limit[, 1] * (1 - t) + limit[, 3] * t\r\n l1 <- pnorm((l - m)/sd)\r\n u1 <- pnorm((u - m)/sd)\r\n x <- runif(n, l1, u1)\r\n y = qnorm(x) * sd + m\r\n results = cbind(y, l, u)\r\n return(results)\r\n}\r\n" }, { "alpha_fraction": 0.3308957815170288, "alphanum_fraction": 0.4095064103603363, "avg_line_length": 26.789474487304688, "blob_id": "2d54371299b388fe4fa5db47a97e89ce833663b9", "content_id": "44e2f1a2a83e8b408cbfac8da427dc2d99cdb0be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 547, "license_type": "no_license", "max_line_length": 61, "num_lines": 19, "path": "/src/R/HSROC/R/REFSTD_5.R", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "REFSTD_5 <-\r\nfunction (rs, yy1, yy2, yy3, yy4, stu.gr, t1, t2, yij) \r\n{\r\n if (rs[[1]] == 1) {\r\n y1 = yy1\r\n y2 = yy2\r\n y3 = yy3\r\n y4 = yy4\r\n }\r\n else {\r\n yy = cbind(stu.gr, t1, t2, yij)\r\n y1 = by(yy[, 2:4], yy[, 1], FUN = XY.function, b = 1)\r\n y2 = by(yy[, 2:4], yy[, 1], FUN = XY.function, b = 2)\r\n y3 = by(yy[, 2:4], yy[, 1], FUN = XY.function, b = 3)\r\n y4 = by(yy[, 2:4], yy[, 1], FUN = XY.function, b = 4)\r\n }\r\n result = list(y1, y2, y3, y4)\r\n return(result)\r\n}\r\n" }, { "alpha_fraction": 0.40617138147354126, "alphanum_fraction": 0.43824052810668945, "avg_line_length": 34.54146194458008, "blob_id": "cbe03e66ea7089759440f1a5fb9cbd89a2fd73cb", "content_id": "be3daf7a071a59c8d18aab1398af22df058e96bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 43718, "license_type": "no_license", "max_line_length": 204, "num_lines": 1230, "path": "/src/R/HSROC/src/HSROC.cpp", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <fstream>\n#include <math.h>\n#include <R.h>\n#include <Rmath.h>\n#include <stdio.h> \n \nusing namespace std;\n\nextern \"C\"{ \n\n double Prob_cpp (int t1, int t2, double pi, double S1, double S2, double C1, double C2)\n\t{\n double res;\n res = pi*pow(S1,t1)*pow(1.0-S1,1-t1)*pow(S2,t2)*pow(1.0-S2,1-t2)/(pi*pow(S1,t1)*pow(1.0-S1,1-t1)*pow(S2,t2)*pow(1.0-S2,1-t2) + (1.0-pi)*pow(C1,1-t1)*pow(1.0-C1,t1)*pow(C2,1-t2)*pow(1.0-C2,t2) ) ;\n return(res);\n\t}\n\n double REFSTD_2 (double likelihood_1, double likelihood_2, double prior)\n\t{\n double res=0.0;\n res = likelihood_1 + likelihood_2 + prior ;\n return(res);\n\t}\n\n double Truncnorm (int t1, double mu, double sd, double theta, double low_r, double up_r )\n\t{\n // t1 = outcome of subject to test under evaluation\n // mu defined as : alpha.rep*(Y.ij/2) - alpha.rep*((1-Y.ij)/2)\n // sd defined as : exp(vec.beta*(Y.ij - 0.5))\n // theta\n // low_r : lower bound of latent variable rij\n // up_r : upper bound of latent variable rij\n\n double low =0.0;\n double up =0.0;\n double low_trunc;\n double up_trunc ;\n double x;\n double res;\n\n // GetRNGstate();\n if(t1 == 1)\n {\n low = theta;\n up = up_r;\n }\n else if (t1 == 0)\n {\n low = low_r;\n up = theta;\n }\n low_trunc = pnorm( (low-mu)/sd, 0.0, 1.0, 1, 0 ) ;\n up_trunc = pnorm( (up-mu)/sd, 0.0, 1.0, 1, 0 ) ;\n x = runif(low_trunc, up_trunc) ;\n res = qnorm(x, 0.0, 1.0, 1, 0)*sd + mu ;\n // PutRNGstate();\n return(res);\n\t}\n\n double Truncnorm2 (double mu, double sd, double low, double up )\n\t{\n // mu defined as : alpha.rep*(Y.ij/2) - alpha.rep*((1-Y.ij)/2)\n // sd defined as : exp(vec.beta*(Y.ij - 0.5))\n // low : lower bound\n // up : upper bound\n\n double low_trunc;\n double up_trunc ;\n double x;\n double res;\n\n //GetRNGstate();\n low_trunc = pnorm( (low-mu)/sd, 0.0, 1.0, 1, 0 ) ;\n up_trunc = pnorm( (up-mu)/sd, 0.0, 1.0, 1, 0 ) ;\n x = runif(low_trunc, up_trunc) ;\n res = qnorm(x, 0.0, 1.0, 1, 0)*sd + mu ;\n //PutRNGstate();\n return(res);\n\t}\n\n\n\tdouble mean (double size_x, double *x )\n\t{\n\n // size_x : size of the vector to average\n // x : vector to average)\n\n int i;\n double partial_res = 0.0;\n double res = 0.0;\n double n = size_x;\n for(i=0; i< n; i++)\n {\n partial_res += x[i];\n }\n res = partial_res/n;\n return(res);\n\t}\n\n\n\n\n\n\n\n\n\n\n\n\n\n\t double truncgamma_cpp(double shape, double scale, double low, double up)\n\t {\n // shape = shape parameter\n // scale = scale parameter\n // theta\n // low : lower bound (truncation)\n // up : upper bound (truncation)\n\n\n double low_trunc;\n double up_trunc;\n double x;\n double res;\n double x_c;\n\n //GetRNGstate();\n low_trunc = pgamma( low, shape, scale, 1, 0 ) ;\n up_trunc = pgamma( up, shape, scale, 1, 0 ) ;\n x = runif(low_trunc, up_trunc) ;\n if(x == 0.0)\n {\n x_c = low;\n }\n else if(x == 1.0)\n {\n x_c = up;\n }\n else x_c = qgamma(x, shape, scale, 1, 0);\n res = x_c;\n //PutRNGstate();\n return(res);\n\t}\n\n\n double MH_algo_cpp (int t, double r[], double Y[], double alpha[], double low, double up, double x )\n\n\t{\n\n // t : Total number of subjects in all studies\n // r : latent variabel rij\n // Y : Yij\n // alpha\n // low : lower bound of prior of beta\n // up : upper bound of prior of beta\n // x : previous state value\n\n //\n\n //double acc_rate = 0.0;\n double alpha_G = 0.0;\n double beta_G = 0.0;\n double alpha_IG = 0.0;\n double beta_IG = 0.0;\n double aprob;\n double u;\n double previous = x;\n\n // PROPOSAL DISTRIBUTION IS AN INVERSE GAMMA involving the part where Yij = 1\n for(int i=0; i < t; i++)\n {\n alpha_G += (1.0-Y[i]);\n beta_G += (1.0-Y[i])*( (r[i] + 0.5*alpha[i])*(r[i] + 0.5*alpha[i]) );\n alpha_IG += Y[i];\n beta_IG += Y[i]*( (r[i] - 0.5*alpha[i])*(r[i] - 0.5*alpha[i]) );\n }\n double alpha_G_param = 0.5*alpha_G;\n double beta_G_param = 0.5*beta_G;\n double alpha_IG_param = 0.5*alpha_IG ;\n double beta_IG_param = 0.5*beta_IG;\n\n\n\n //GetRNGstate();\n double can = 1/truncgamma_cpp(alpha_IG_param, 1.0/beta_IG_param, 1.0/up, 1.0/low);\n double numerator = dgamma(can, alpha_G_param, 1.0/(beta_G_param), 0);\n double denominator = dgamma(x, alpha_G_param, 1.0/(beta_G_param), 0);\n double ratio = numerator/(denominator);\n if(1.0 < ratio) aprob = 1.0;\n else aprob = ratio ;\n u = runif(0.0,1.0);\n \tif(u < aprob)\n {\n previous = can;\n // acc_rate = 1;\n }\n //else previous = previous;\n double res = previous;\n return(res);\n //PutRNGstate();\n\t}\n\n double summation (int t, double x[], double y)\n {\n double res = 0.0;\n double partial_res = 0.0;\n for(int i=0; i<t; i++)\n {\n partial_res += (x[i] - y)*(x[i] - y);\n }\n res = 1.0/( 0.5*partial_res );\n return(res);\n }\n\n double summation2 (int t, double x[], double y, double z)\n {\n double res = 0.0;\n double partial_res = 0.0;\n for(int i=0; i<t; i++)\n {\n partial_res += (x[i] - y)*(x[i] - y);\n }\n res = 1.0/( z + 0.5*partial_res );\n return(res);\n }\n\n\n\n\n/*\t//PROGRESSION BAR\n int main (N1 )\n {\n int counter ;\n for (counter = 0; counter < (N1+1); ++counter) {\n\t\t if ((counter / N2) == 2 && (counter % N2) == 0) {\n\t\t\t cout << \"[==== ]20%\\r\";\n\t\t }\n\t\t else if ((counter / N2) == 4 && (counter % N2) == 0) {\n\t\t\t cout << \"[======== ]40%\\r\";\n\t\t }\n\t\t else if ((counter / N2) == 6 && (counter % N2) == 0) {\n\t\t\t cout << \"[============ ]60%\\r\";\n\t \t }\n\t\t else if ((counter / N2) == 8 && (counter % N2) == 0) {\n\t\t\t cout << \"[================ ]80%\\r\";\n\t\t }\n\t\t else if ((counter / N2) == 10 && (counter % N2) == 0) {\n\t\t\t cout << \"[====================]100%\" << endl;\n\t\t }\n\t }\n }\n*/\n\n double function1 (int gs, double x )\n {\n double a = 0.0;\n if(gs == 0)\n {\n a = x;\n }\n else if(gs == 1)\n {\n a = 1.0;\n }\n return(a);\n }\n\n double function2 (int gs, double a, double b)\n {\n double res = 0.0;\n if(gs == 0)\n {\n res = rbeta(a, b);\n }\n else if(gs == 1)\n {\n res = 1;\n }\n return(res);\n }\n \n double size_alloc_between_study (int iteration, int max_size)\n {\n int res = 0;\n if(iteration > max_size) { res = max_size; }\n else if(iteration <= max_size) { res = iteration; }\n return(res) ; \n }\n\n\n void HSROC (int *iter, int *gold_std, int *gold_se, int *gold_sp, int *total, int *t1, int *t2, double *vec_pi, double *vec_S1,\n double *vec_S2, double *vec_C1, double *vec_C2, int *study_samplesize, int *n_studies, double *alpha_pi, double *beta_pi,\n int *refstd, int *numb_refstd, double *sens2_alpha, double *sens2_beta, double *spec2_alpha, double *spec2_beta, double *vec_alpha,\n double *vec_theta, double *vec_beta, double *low_rij, double *up_rij, double *vec_CTHETA, double *vec_sigma_theta, double *vec_sigma_alpha,\n double *vec_LAMBDA, double *LAMBDA_lower, double *LAMBDA_upper, double *beta_a, double *beta_b, double *CTHETA_lower,\n double *CTHETA_upper, double *low_sd_alpha, double *up_sd_alpha, double *low_sd_theta, double *up_sd_theta,\n int *prior_sd_alpha, int *prior_sd_theta, int *refresh, int *break_point\n\n )\n \n \n /* (int *iter, int *gold_std, int *gold_se, int *gold_sp, int *total, int *t1, int *t2, double *vec_pi, double *vec_S1,\n double *vec_S2, double *vec_C1, double *vec_C2,\n int *study_samplesize, int *n_studies, double *alpha_pi, double *beta_pi, int *refstd, int *numb_refstd,\n double *sens2_alpha, double *sens2_beta, double *spec2_alpha, double *spec2_beta, double *vec_alpha, double *vec_theta,\n double *vec_beta, double *low_rij, double *up_rij\n , int *refresh ) */\n \n\n\t{\n //int single_print = 200000;\n \n/* if( (*iter)*(*n_studies) > single_print)\n {\n int howmany = floor( (*iter)*(*n_studies) ) ;\n int remains = fmod( (*iter)*(*n_studies), single_point); \n double matrix_PI[howmany];\n }\n else if( (*iter)*(*n_studies) <= single_print )\n {\n double matrix_PI[(*iter)*(*n_studies)]; \n }\n*/\n FILE *PI_file;\n PI_file = fopen(\"PI.txt\", \"wb\");\n double *matrix_PI = new double [(*iter)*(*n_studies)];\n \n \n FILE *S2_file;\n S2_file = fopen(\"Sens2.txt\", \"wb\");\n double *matrix_S2 = new double [(*iter)*(*refstd)];\n \n FILE *C2_file;\n C2_file = fopen(\"Spec2.txt\", \"wb\");\n double *matrix_C2 = new double [(*iter)*(*refstd)]; \n \n FILE *theta_file;\n theta_file = fopen(\"theta.txt\", \"wb\");\n double *matrix_theta = new double [(*iter)*(*n_studies)];\n\n FILE *alpha_file;\n alpha_file = fopen(\"alpha.txt\", \"wb\");\n double *matrix_alpha = new double [(*iter)*(*n_studies)];\n \n FILE *LAMBDA_file;\n LAMBDA_file = fopen(\"LAMBDA.txt\", \"wb\");\n double *matrix_LAMBDA = new double [(*iter)*(*n_studies)];\n \n FILE *beta_file;\n beta_file = fopen(\"beta.txt\", \"wb\");\n double *matrix_beta = new double [*iter];\n \n FILE *CTHETA_file;\n CTHETA_file = fopen(\"capital_THETA.txt\", \"wb\");\n double *matrix_CTHETA = new double [*iter];\n \n FILE *sd_alpha_file;\n sd_alpha_file = fopen(\"sigma.alpha.txt\", \"wb\");\n double *matrix_sd_alpha = new double [*iter];\n \n FILE *sd_theta_file;\n sd_theta_file = fopen(\"sigma.theta.txt\", \"wb\");\n double *matrix_sd_theta = new double [*iter];\n \n FILE *S1_file;\n S1_file = fopen(\"Sens1.txt\", \"wb\");\n double *matrix_S1 = new double [(*iter)*(*n_studies)];\n\n FILE *C1_file;\n C1_file = fopen(\"Spec1.txt\", \"wb\");\n double *matrix_C1 = new double [(*iter)*(*n_studies)];\n \n FILE *pool_S_file;\n pool_S_file = fopen(\"S_overall.txt\", \"wb\");\n double *matrix_pool_S = new double [*iter];\n \n FILE *pool_C_file;\n pool_C_file = fopen(\"C_overall.txt\", \"wb\");\n double *matrix_pool_C = new double [*iter];\n \n FILE *SNEW_file;\n SNEW_file = fopen(\"Sens1_new.txt\", \"wb\");\n double *matrix_SNEW = new double [*iter];\n\n FILE *CNEW_file;\n CNEW_file = fopen(\"Spec1_new.txt\", \"wb\");\n double *matrix_CNEW = new double [*iter];\n \n \n// int t1_0[*total];\n// int t2_0[*total];\n// for(int i=0; i<*total; i++)\n// {\n// t1_0[i] = 1 - t1[i];\n// t2_0[i] = 1 - t2[i]; \n// }\n\n int condition = 1; // Variable to break the outer loop if undefined real results occurs inside an inner loop, i.e. inisde the inner loop of prevalence, or alpha, or theta, ...\n int loop_count = 1;\n Rprintf(\"Starting the Gibbs sampler for %d iterations ... \\n\", *iter);\n for(int big_loop=0; big_loop<*iter; big_loop++)\n {\n if(big_loop == (*refresh)*loop_count) {\n\t Rprintf(\" %d iterations completed out of %d ... \\n\", (*refresh)*loop_count, *iter );\n\t loop_count = loop_count + 1;\n }\n \n \n //*****************************************************\n // Creation of \"Restore\" files. The following files will include the last posterior estimates before the program crashes (if it ever crashes)\n //*****************************************************\n/* ofstream Restore_file;\n Restore_file.open(\"Restore.txt\");\n for(int ii1=0; ii1<*n_studies; ii1++)\n {\n Restore_file << vec_alpha[ii1] << \" \";\n }\n Restore_file << endl;\n for(int ii2=0; ii2<*n_studies; ii2++)\n {\n Restore_file << vec_theta[ii2] << \" \";\n }\n Restore_file << endl;\n for(int ii3=0; ii3<*n_studies; ii3++)\n {\n Restore_file << vec_S1[ii3] << \" \";\n }\n Restore_file << endl;\n for(int ii4=0; ii4<*n_studies; ii4++)\n {\n Restore_file << vec_C1[ii4] << \" \";\n }\n Restore_file << endl;\n for(int ii5=0; ii5<*n_studies; ii5++)\n {\n Restore_file << vec_pi[ii5] << \" \";\n }\n Restore_file << endl;\n\n Restore_file.close();\n\n ofstream Restore2_file;\n Restore2_file.open(\"Restore2.txt\");\n Restore2_file << *vec_LAMBDA << \" \" << *vec_sigma_alpha << \" \" << *vec_CTHETA << \" \" << *vec_sigma_theta << \" \" << *vec_beta ;\n Restore2_file << endl;\n Restore2_file.close();\n \n if(*gold_std == 0)\n {\n ofstream Restore3_file;\n Restore3_file.open(\"Restore3.txt\");\n for(int ii6=0; ii6<*refstd; ii6++)\n {\n Restore3_file << vec_S2[ii6] << \" \";\n }\n Restore3_file << endl; \n for(int ii7=0; ii7<*refstd; ii7++)\n {\n Restore3_file << vec_C2[ii7] << \" \";\n }\n Restore3_file << endl;\n Restore3_file.close();\n } \n*/ //*****************************************************\n\n //*****************************************************\n // REPEAT THE PI, S1 and C1 VECTORS\n //*****************************************************\n int count = 0;\n double pi_rep[*total];\n double S1_rep[*total];\n double C1_rep[*total];\n for(int i=0; i<*n_studies ; i++)\n {\n int N = study_samplesize[i];\n for(int j=0; j<N ; j++)\n {\n pi_rep[count] = vec_pi[i];\n S1_rep[count] = vec_S1[i];\n C1_rep[count] = vec_C1[i];\n\n count++;\n }\n }\n int count2 = 0;\n double S2_rep[*total];\n double C2_rep[*total];\n for(int i2=0; i2<*refstd ; i2++)\n {\n int N2 = numb_refstd[i2];\n for(int j2=0; j2<N2 ; j2++)\n {\n S2_rep[count2] = function1(*gold_std, vec_S2[i2]);\n C2_rep[count2] = function1(*gold_std, vec_C2[i2]);\n count2++;\n }\n } \n //*****************************************************\n\n\n //*****************************************************\n // Calculation of prob.Yj\n //*****************************************************\n double res_probYij[*total];\n for(int i3=0; i3<*total; i3++)\n\t\t{\n res_probYij[i3] = Prob_cpp(t1[i3], t2[i3], pi_rep[i3], S1_rep[i3], S2_rep[i3], C1_rep[i3], C2_rep[i3]);\n }\n //*****************************************************\n\n\n //*****************************************************\n // Calculation of Y.ij\n //*****************************************************\n double res_Yij[*total];\n GetRNGstate();\n for(int i4=0; i4<*total; i4++)\n {\n res_Yij[i4] = rbinom(1,res_probYij[i4]);\n }\n PutRNGstate();\n //*****************************************************\n\n\n //*****************************************************\n // Calculation of Y1, Y2, Y3 and Y4 AND\n //*****************************************************\n // CONDITIONAL DISTRIBUTION OF PI\n int count5=0;\n double resY1[*n_studies];\n double resY2[*n_studies];\n double resY3[*n_studies];\n double resY4[*n_studies];\n double resPI_a[*n_studies];\n double resPI_b[*n_studies];\n \n //resY1[i5] = resY2[i5] = resY3[i5] = resY4[i5] = resPI_a[i5] = 0.0;\n for(int i5=0; i5<*n_studies; i5++)\n {\n int N5 = study_samplesize[i5] ;\n double sum_y = 0.0;\n for(int j5=0; j5<N5; j5++)\n {\n resY1[i5] += t1[j5+count5]*t2[j5+count5]*res_Yij[j5+count5];\n resY2[i5] += (1-t1[j5+count5])*(1-t2[j5+count5])*res_Yij[j5+count5] ;\n resY3[i5] += t1[j5+count5]*(1-t2[j5+count5])*res_Yij[j5+count5] ;\n resY4[i5] += (1-t1[j5+count5])*t2[j5+count5]*res_Yij[j5+count5] ;\n //resPI_a[i5] += res_Yij[j5+count5];\n sum_y += res_Yij[j5+count5];\n }\n resPI_a[i5] = sum_y + *alpha_pi ;\n resPI_b[i5] = N5 - sum_y + *beta_pi ;\n count5 = count5 + N5 ;\n }\n //double pi_iter[*n_studies];\n GetRNGstate();\n for(int i6=0; i6<*n_studies; i6++)\n {\n vec_pi[i6] = rbeta( resPI_a[i6], resPI_b[i6]); \n matrix_PI[(big_loop*(*n_studies)) + i6] = vec_pi[i6];\n //PI_file << vec_pi[i6] << \" \";\n if(!finite(vec_pi[i6]))\n {\n Rprintf(\"Undefined real result. \\n\");\n Rprintf(\"Please set 'first.run' argument to 'FALSE' and call HSROC() again.\\n\");\n condition = 0;\n *break_point = 5;\n break;\n }\n } \n if(condition == 0)\n {\n break; \n } \n //PI_file << endl;\n PutRNGstate();\n //*****************************************************\n\n\n\n\n\n // *****************************************************\n // CONDITIONAL DISTRIBUTION OF S2 & C2. WE WILL ASSUME A NON GOLD STARDARD WITH MULTIPLE REFERENCE STADARDS\n // *****************************************************\n double res_Xij[*total]; \n for(int i2a=0; i2a<*total; i2a++)\n {\n res_Xij[i2a] = (1.0 - res_Yij[i2a]);\n //Xij_file << res_Xij[i2a] << \" \";\n }\n //Xij_file << endl;\n\n int count3=0;\n double resX1[*refstd];\n double resX2[*refstd];\n double resX3[*refstd];\n double resX4[*refstd];\n double resYY1[*refstd];\n double resYY2[*refstd];\n double resYY3[*refstd];\n double resYY4[*refstd];\n for(int i6=0; i6<*refstd; i6++)\n {\n int rssize2 = numb_refstd[i6];\n for(int j3=0; j3<rssize2; j3++)\n {\n resX1[i6] += t1[j3+count3]*t2[j3+count3]*res_Xij[j3+count3];\n resX2[i6] += (1-t1[j3+count3])*(1-t2[j3+count3])*res_Xij[j3+count3];\n resX3[i6] += t1[j3+count3]*(1-t2[j3+count3])*res_Xij[j3+count3];\n resX4[i6] += (1-t1[j3+count3])*t2[j3+count3]*res_Xij[j3+count3];\n resYY1[i6] += t1[j3+count3]*t2[j3+count3]*res_Yij[j3+count3];\n resYY2[i6] += (1-t1[j3+count3])*(1-t2[j3+count3])*res_Yij[j3+count3];\n resYY3[i6] += t1[j3+count3]*(1-t2[j3+count3])*res_Yij[j3+count3];\n resYY4[i6] += (1-t1[j3+count3])*t2[j3+count3]*res_Yij[j3+count3];\n }\n //X1_file << resX1[i6] << \" \";\n count3 = count3 + rssize2 ;\n }\n //X1_file << endl;\n\n\n/* if(*gold_se == 1)\n {\n double a_sp2[*refstd];\n double b_sp2[*refstd];\n for(int i9=0; i9<*refstd; i9++)\n {\n a_sp2[i9] = resX2[i9] + resX3[i9] + spec2_alpha[i9];\n b_sp2[i9] = resX1[i9] + resX4[i9] + spec2_beta[i9];\n }\n S2_file << endl;\n GetRNGstate();\n for(int i9a=0; i9a<*refstd; i9a++)\n {\n vec_C2[i9a] = rbeta(a_sp2[i9a], b_sp2[i9a]);\n C2_file << vec_C2[i9a] << \" \";\n } \n C2_file << endl;\n PutRNGstate();\n *vec_S1 = 1;\n for(int reinit_3=0; reinit_3<*refstd; reinit_3++)\n {\n a_sp2[reinit_3] = 0;\n b_sp2[reinit_3] = 0;\n }\n }\n else if(*gold_sp == 1)\n {\n double a_se2[*refstd];\n double b_se2[*refstd];\n for(int i9=0; i9<*refstd; i9++)\n {\n a_se2[i9] = resYY1[i9] + resYY4[i9] + sens2_alpha[i9];\n b_se2[i9] = resYY2[i9] + resYY3[i9] + sens2_beta[i9];\n }\n GetRNGstate();\n for(int i9a=0; i9a<*refstd; i9a++)\n {\n vec_S2[i9a] = rbeta(a_se2[i9a], b_se2[i9a]);\n S2_file << vec_S2[i9a] << \" \";\n }\n *vec_C1 = 1;\n PutRNGstate();\n for(int reinit_3=0; reinit_3<*refstd; reinit_3++)\n {\n a_se2[reinit_3] = 0;\n b_se2[reinit_3] = 0;\n }\n }\n else\n {\n*/ \n\n double a_se2[*refstd];\n double b_se2[*refstd];\n double a_sp2[*refstd];\n double b_sp2[*refstd];\n for(int i9=0; i9<*refstd; i9++)\n {\n a_se2[i9] = resYY1[i9] + resYY4[i9] + sens2_alpha[i9];\n b_se2[i9] = resYY2[i9] + resYY3[i9] + sens2_beta[i9];\n a_sp2[i9] = resX2[i9] + resX3[i9] + spec2_alpha[i9];\n b_sp2[i9] = resX1[i9] + resX4[i9] + spec2_beta[i9];\n }\n GetRNGstate();\n for(int i9a=0; i9a<*refstd; i9a++)\n {\n vec_S2[i9a] = function2(*gold_std, a_se2[i9a], b_se2[i9a] );\n matrix_S2[(big_loop*(*refstd)) + i9a] = vec_S2[i9a];\n //S2_file << vec_S2[i9a] << \" \";\n if(!finite(vec_S2[i9a]))\n {\n Rprintf(\"Undefined real result. \\n\");\n Rprintf(\"Please set 'first.run' argument to 'FALSE' and call HSROC() again.\\n\");\n condition = 0;\n *break_point = 11;\n break;\n \n }\n }\n if(condition == 0)\n {\n break;\n }\n //S2_file << endl;\n PutRNGstate();\n GetRNGstate();\n for(int i9b=0; i9b<*refstd; i9b++)\n {\n vec_C2[i9b] = function2(*gold_std, a_sp2[i9b], b_sp2[i9b] );\n matrix_C2[(big_loop*(*refstd)) + i9b] = vec_C2[i9b];\n //C2_file << vec_C2[i9b] << \" \";\n if(!finite(vec_C2[i9b]))\n {\n Rprintf(\"Undefined real result. \\n\");\n Rprintf(\"Please set 'first.run' argument to 'FALSE' and call HSROC() again.\\n\");\n condition = 0;\n *break_point = 12;\n break;\n }\n }\n if(condition == 0) {break;} //break the outer loop because of undefined real results\n //C2_file << endl;\n PutRNGstate();\n\n for(int reinit_3=0; reinit_3<*refstd; reinit_3++)\n {\n resX1[reinit_3] = 0.0;\n resX2[reinit_3] = 0.0;\n resX3[reinit_3] = 0.0;\n resX4[reinit_3] = 0.0;\n resYY1[reinit_3] = 0.0;\n resYY2[reinit_3] = 0.0;\n resYY3[reinit_3] = 0.0;\n resYY4[reinit_3] = 0.0;\n }\n \n\n // *****************************************************\n\n // *****************************************************\n // CONDITIONAL DISTRIBUTION OF rij\n // *****************************************************\n // REPEAT THE alpha AND theta VECTORS\n int counter_iv = 0;\n double alpha_rep[*total];\n double theta_rep[*total];\n double mu[*total];\n double sd[*total];\n double rij[*total];\n GetRNGstate();\n for(int iv=0; iv<*n_studies ; iv++)\n {\n int times_iv = study_samplesize[iv];\n for(int jv=0; jv<times_iv ; jv++)\n {\n alpha_rep[counter_iv] = vec_alpha[iv];\n theta_rep[counter_iv] = vec_theta[iv];\n mu[counter_iv] = alpha_rep[counter_iv]*(res_Yij[counter_iv]/2.0) - alpha_rep[counter_iv]*((1.0-res_Yij[counter_iv])/2.0);\n sd[counter_iv] = exp(*vec_beta*(res_Yij[counter_iv] - 0.5));\n rij[counter_iv] = Truncnorm(t1[counter_iv], mu[counter_iv], sd[counter_iv], theta_rep[counter_iv], *low_rij, *up_rij);\n counter_iv++;\n }\n }\n PutRNGstate();\n // *****************************************************\n\n//\n // *****************************************************\n // CONDITIONAL DISTRIBUTION OF theta\n // *****************************************************\n int count10 = 0;\n double lower_t[*n_studies];\n double upper_t[*n_studies];\n for(int i10 = 0; i10<*n_studies; i10++)\n {\n int size_10 = study_samplesize[i10] ;\n double max = *low_rij;\n double min = *up_rij;\n for(int j10 = 0; j10<size_10; j10++)\n {\n if(t1[j10+count10] == 0)\n {\n if (max < rij[j10+count10]) max = rij[j10+count10];\n }\n else if(t1[j10+count10] == 1)\n {\n if (min > rij[j10+count10]) min = rij[j10+count10];\n else min = min;\n }\n }\n lower_t[i10] = max ;\n upper_t[i10] = min ;\n count10 = count10 + size_10 ;\n }\n GetRNGstate();\n for(int i11=0; i11<*n_studies; i11++)\n {\n vec_theta[i11] = Truncnorm2(*vec_CTHETA, *vec_sigma_theta, lower_t[i11], upper_t[i11] ) ;\n matrix_theta[(big_loop*(*n_studies)) + i11] = vec_theta[i11];\n //theta_file << vec_theta[i11] << \" \";\n if(!finite(vec_theta[i11]))\n {\n Rprintf(\"Undefined real result. \\n\");\n Rprintf(\"Please set 'first.run' argument to 'FALSE' and call HSROC() again.\\n\");\n condition = 0;\n *break_point = 2;\n break;\n }\n }\n if(condition == 0) {break;} //break the outer loop because of undefined real results\n //theta_file << endl;\n PutRNGstate();\n // *****************************************************\n\n\n // *****************************************************\n // CONDITIONAL DISTRIBUTION OF alpha\n // *****************************************************\n int count12 = 0;\n double A[*n_studies];\n double B[*n_studies];\n for (int i12 = 0; i12 < *n_studies; i12++)\n {\n double partial_A = 0.0;\n double partial_B = 0.0;\n int size_12 = study_samplesize[i12] ;\n for(int j12=0 ; j12<size_12; j12++)\n {\n partial_A += (res_Yij[j12+count12]*res_Yij[j12+count12] + (1.0-res_Yij[j12+count12])*(1.0-res_Yij[j12+count12]))/exp(*vec_beta*2.0*(res_Yij[j12+count12] - 0.5));\n partial_B += (rij[j12+count12]*(2.0*res_Yij[j12+count12] - 1.0) )/exp(*vec_beta*2.0*(res_Yij[j12+count12] - 0.5));\n }\n count12 = count12 + size_12 ;\n A[i12] = 0.25*partial_A + 1.0/( (*vec_sigma_alpha)*(*vec_sigma_alpha) );\n B[i12] = partial_B + (*vec_LAMBDA*2.0)/( (*vec_sigma_alpha)*(*vec_sigma_alpha) );\n //vec_alpha[i12] = rnorm(B[i12]/(2.0*A[i12]), 1.0/sqrt(A[i12]));\n //alpha_file << vec_alpha[i12] << \" \";\n }\n GetRNGstate();\n for(int i14 = 0; i14 < *n_studies; i14++)\n {\n vec_alpha[i14] = rnorm(B[i14]/(2.0*A[i14]), 1.0/sqrt(A[i14]));\n matrix_alpha[(big_loop*(*n_studies)) + i14] = vec_alpha[i14];\n //alpha_file << vec_alpha[i14] << \" \";\n if(!finite(vec_alpha[i14]))\n {\n Rprintf(\"Undefined real result. \\n\");\n Rprintf(\"Please set 'first.run' argument to 'FALSE' and call HSROC() again.\\n\");\n condition = 0;\n *break_point = 1;\n break;\n }\n }\n if(condition == 0) {break;} //break the outer loop because of undefined real results\n //alpha_file << endl;\n PutRNGstate();\n // *****************************************************\n\n // *****************************************************\n // CONDITIONAL DISTRIBUTION OF LAMBDA\n // *****************************************************\n GetRNGstate();\n *vec_LAMBDA = Truncnorm2(mean(*n_studies,vec_alpha), *vec_sigma_alpha/sqrt(*n_studies), *LAMBDA_lower, *LAMBDA_upper ) ;\n matrix_LAMBDA[ big_loop ] = *vec_LAMBDA;\n //LAMBDA_file << *vec_LAMBDA << \" \";\n //LAMBDA_file << endl;\n PutRNGstate();\n if(!finite(*vec_LAMBDA))\n {\n Rprintf(\"Undefined real result. \\n\");\n Rprintf(\"Please set 'first.run' argument to 'FALSE' and call HSROC() again.\\n\");\n *break_point = 6;\n break;\n }\n // *****************************************************\n\n\n\n // *****************************************************\n // CONDITIONAL DISTRIBUTION OF beta\n // *****************************************************\n double vec_exp_beta = 0.0;\n GetRNGstate();\n vec_exp_beta = MH_algo_cpp(*total, rij, res_Yij, alpha_rep, exp(*beta_a), exp(*beta_b), exp(*vec_beta) );\n *vec_beta = log(vec_exp_beta) ;\n matrix_beta[ big_loop ] = *vec_beta;\n //beta_file << *vec_beta << \" \";\n //beta_file << endl;\n PutRNGstate();\n if(!finite(*vec_beta))\n {\n Rprintf(\"Undefined real result. \\n\");\n Rprintf(\"Please set 'first.run' argument to 'FALSE' and call HSROC() again.\\n\");\n *break_point = 10;\n break;\n } \n // *****************************************************\n\n\n // *****************************************************\n // CONDITIONAL DISTRIBUTION OF CAPITAL_THETA\n // *****************************************************\n GetRNGstate();\n *vec_CTHETA = Truncnorm2(mean(*n_studies,vec_theta), *vec_sigma_theta/sqrt(*n_studies), *CTHETA_lower, *CTHETA_upper ) ;\n matrix_CTHETA[ big_loop ] = *vec_CTHETA;\n //CTHETA_file << *vec_CTHETA << \" \";\n //CTHETA_file << endl;\n PutRNGstate();\n if(!finite(*vec_CTHETA))\n {\n Rprintf(\"Undefined real result. \\n\");\n Rprintf(\"Please set 'first.run' argument to 'FALSE' and call HSROC() again.\\n\");\n *break_point = 8;\n break;\n }\n // *****************************************************\n\n\n\n // *****************************************************\n // CONDITIONAL DISTRIBUTION OF sigma_alpha\n // *****************************************************\n if(*prior_sd_alpha == 1) // 1 = \"sd\"\n {\n double prec_alpha_shape = 0.0;\n double prec_alpha_scale = 0.0;\n double prec_alpha = 0.0;\n\n prec_alpha_shape = (*n_studies/2.0) - 0.5;\n prec_alpha_scale = summation(*n_studies, vec_alpha, *vec_LAMBDA);\n GetRNGstate();\n prec_alpha = truncgamma_cpp(prec_alpha_shape, prec_alpha_scale, *low_sd_alpha, *up_sd_alpha);\n PutRNGstate();\n *vec_sigma_alpha = sqrt(1.0/prec_alpha);\n matrix_sd_alpha[ big_loop ] = *vec_sigma_alpha;\n //sd_alpha_file << *vec_sigma_alpha << \" \";\n //sd_alpha_file << endl;\n if(!finite(*vec_sigma_alpha))\n {\n Rprintf(\"Undefined real result. \\n\");\n Rprintf(\"Please set 'first.run' argument to 'FALSE' and call HSROC() again.\\n\");\n *break_point = 7;\n break;\n }\n }\n else if(*prior_sd_alpha == 2) // 2 = \"v\"\n {\n double prec_alpha_shape = 0.0;\n double prec_alpha_scale = 0.0;\n double prec_alpha = 0.0;\n\n prec_alpha_shape = (*n_studies/2.0) - 1.0;\n prec_alpha_scale = summation(*n_studies, vec_alpha, *vec_LAMBDA);\n GetRNGstate();\n prec_alpha = truncgamma_cpp(prec_alpha_shape, prec_alpha_scale, *low_sd_alpha, *up_sd_alpha);\n PutRNGstate();\n *vec_sigma_alpha = sqrt(1.0/prec_alpha);\n matrix_sd_alpha[ big_loop ] = *vec_sigma_alpha;\n //sd_alpha_file << *vec_sigma_alpha << \" \";\n //sd_alpha_file << endl;\n if(!finite(*vec_sigma_alpha))\n {\n Rprintf(\"Undefined real result. \\n\");\n Rprintf(\"Please set 'first.run' argument to 'FALSE' and call HSROC() again.\\n\");\n *break_point = 7;\n break;\n }\n }\n else if(*prior_sd_alpha == 3) // 3 = \"p\"\n {\n double prec_alpha_shape = 0.0;\n double prec_alpha_scale = 0.0;\n double prec_alpha = 0.0;\n\n prec_alpha_shape = (*n_studies/2.0) + *low_sd_alpha;\n prec_alpha_scale = summation2(*n_studies, vec_alpha, *vec_LAMBDA, *up_sd_alpha);\n GetRNGstate();\n prec_alpha = rgamma( prec_alpha_shape, prec_alpha_scale ) ;\n PutRNGstate();\n *vec_sigma_alpha = sqrt(1.0/prec_alpha);\n matrix_sd_alpha[ big_loop ] = *vec_sigma_alpha;\n //sd_alpha_file << *vec_sigma_alpha << \" \";\n //sd_alpha_file << endl;\n if(!finite(*vec_sigma_alpha))\n {\n Rprintf(\"Undefined real result. \\n\");\n Rprintf(\"Please set 'first.run' argument to 'FALSE' and call HSROC() again.\\n\");\n *break_point = 7;\n break;\n }\n }\n\n // *****************************************************\n\n\n\n\n\n\n // *****************************************************\n // CONDITIONAL DISTRIBUTION OF sigma_theta\n // *****************************************************\n if(*prior_sd_theta == 1) // 1 = \"sd\"\n {\n double prec_theta_shape = 0.0;\n double prec_theta_scale = 0.0;\n double prec_theta = 0.0;\n\n prec_theta_shape = (*n_studies/2.0) - 0.5;\n prec_theta_scale = summation(*n_studies, vec_theta, *vec_CTHETA);\n GetRNGstate();\n prec_theta = truncgamma_cpp(prec_theta_shape, prec_theta_scale, *low_sd_theta, *up_sd_theta);\n PutRNGstate();\n *vec_sigma_theta = sqrt(1.0/prec_theta);\n matrix_sd_theta[ big_loop ] = *vec_sigma_theta;\n //sd_theta_file << *vec_sigma_theta << \" \";\n //sd_theta_file << endl;\n if(!finite(*vec_sigma_theta))\n {\n Rprintf(\"Undefined real result. \\n\");\n Rprintf(\"Please set 'first.run' argument to 'FALSE' and call HSROC() again.\\n\");\n *break_point = 9;\n break;\n }\n }\n if(*prior_sd_theta == 2) // 2 = \"v\"\n {\n double prec_theta_shape = 0.0;\n double prec_theta_scale = 0.0;\n double prec_theta = 0.0;\n\n prec_theta_shape = (*n_studies/2.0) - 1.0;\n prec_theta_scale = summation(*n_studies, vec_theta, *vec_CTHETA);\n GetRNGstate();\n prec_theta = truncgamma_cpp(prec_theta_shape, prec_theta_scale, *low_sd_theta, *up_sd_theta);\n PutRNGstate();\n *vec_sigma_theta = sqrt(1.0/prec_theta);\n matrix_sd_theta[ big_loop ] = *vec_sigma_theta;\n //sd_theta_file << *vec_sigma_theta << \" \";\n //sd_theta_file << endl;\n if(!finite(*vec_sigma_theta))\n {\n Rprintf(\"Undefined real result. \\n\");\n Rprintf(\"Please set 'first.run' argument to 'FALSE' and call HSROC() again.\\n\");\n *break_point = 9;\n break;\n }\n }\n if(*prior_sd_theta == 3) // 3 = \"p\"\n {\n double prec_theta_shape = 0.0;\n double prec_theta_scale = 0.0;\n double prec_theta = 0.0;\n\n prec_theta_shape = (*n_studies/2.0) + *low_sd_theta;\n prec_theta_scale = summation2(*n_studies, vec_theta, *vec_CTHETA, *up_sd_theta );\n GetRNGstate();\n prec_theta = rgamma( prec_theta_shape, prec_theta_scale ) ;\n PutRNGstate();\n *vec_sigma_theta = sqrt(1.0/prec_theta);\n matrix_sd_theta[ big_loop ] = *vec_sigma_theta;\n //sd_theta_file << *vec_sigma_theta << \" \";\n //sd_theta_file << endl;\n if(!finite(*vec_sigma_theta))\n {\n Rprintf(\"Undefined real result. \\n\");\n Rprintf(\"Please set 'first.run' argument to 'FALSE' and call HSROC() again.\\n\");\n *break_point = 9;\n break;\n }\n }\n // *****************************************************\n\n\n\n // *****************************************************\n // CONDITIONAL DISTRIBUTION OF S1 AND C1 AND Pooled S and Pooled C\n // *****************************************************\n GetRNGstate();\n for(int i15=0; i15<*n_studies; i15++)\n {\n vec_S1[i15] = 1.0-pnorm( exp((-(*vec_beta))/2.0)*(vec_theta[i15] - (vec_alpha[i15]/2.0)), 0, 1, 1, 0 );\n matrix_S1[(big_loop*(*n_studies)) + i15] = vec_S1[i15];\n //S1_file << vec_S1[i15] << \" \";\n if(!finite(vec_S1[i15]))\n {\n Rprintf(\"Undefined real result. \\n\");\n Rprintf(\"Please set 'first.run' argument to 'FALSE' and call HSROC() again.\\n\");\n condition = 0;\n *break_point = 3;\n break;\n }\n } \n if(condition == 0) {break;} //break the outer loop because of undefined real results\n //S1_file << endl;\n PutRNGstate();\n GetRNGstate();\n for(int i16=0; i16<*n_studies; i16++)\n {\n vec_C1[i16] = pnorm( exp(*vec_beta/2.0)*(vec_theta[i16] + (vec_alpha[i16]/2.0)), 0, 1, 1, 0 );\n matrix_C1[(big_loop*(*n_studies)) + i16] = vec_C1[i16];\n //C1_file << vec_C1[i16] << \" \";\n if(!finite(vec_C1[i16]))\n {\n Rprintf(\"Undefined real result. \\n\");\n Rprintf(\"Please set 'first.run' argument to 'FALSE' and call HSROC() again.\\n\");\n condition = 0;\n *break_point = 4;\n break;\n }\n\n }\n if(condition == 0) {break;} //break the outer loop because of undefined real results\n //C1_file << endl;\n PutRNGstate();\n GetRNGstate();\n double C_pool = pnorm( exp(*vec_beta/2.0)*(*vec_CTHETA + (*vec_LAMBDA/2.0)), 0, 1, 1, 0 );\n matrix_pool_C[ big_loop ] = C_pool;\n PutRNGstate();\n //pool_C_file << C_pool << \" \";\n //pool_C_file << endl;\n GetRNGstate();\n double S_pool = 1.0-pnorm( exp(-(*vec_beta)/2.0)*(*vec_CTHETA - (*vec_LAMBDA/2.0)), 0, 1, 1, 0 );\n matrix_pool_S[ big_loop ] = S_pool;\n PutRNGstate();\n //pool_S_file << S_pool << \" \";\n //pool_S_file << endl;\n // *****************************************************\n\n\n // *****************************************************\n // CALCULATION OF POSTERIOR PREDICTIVE VALUES FOR A NEW STUDY THAT HAS NOT YET TAKEN PLACE\n // *****************************************************\n GetRNGstate();\n double beta_new = runif(*beta_a, *beta_b) ;\n PutRNGstate();\n GetRNGstate();\n double theta_new = rnorm(*vec_CTHETA, *vec_sigma_theta) ;\n PutRNGstate();\n GetRNGstate();\n double alpha_new = rnorm(*vec_LAMBDA, *vec_sigma_alpha) ;\n PutRNGstate();\n GetRNGstate();\n double Sens_new = 1.0 - pnorm( exp(-(beta_new)/2.0)*(theta_new - (alpha_new/2.0)), 0, 1, 1, 0 ) ;\n PutRNGstate();\n matrix_SNEW[ big_loop ] = Sens_new;\n //SNEW_file << Sens_new << \" \";\n //SNEW_file << endl;\n GetRNGstate();\n double Spec_new = pnorm( exp(beta_new/2.0)*(theta_new + (alpha_new/2.0)), 0, 1, 1, 0 ) ;\n PutRNGstate();\n matrix_CNEW[ big_loop ] = Spec_new;\n //CNEW_file << Spec_new << \" \";\n //CNEW_file << endl;\n\n // *****************************************************\n//\n\n\n\n // RE-INITIALIZE SOME OF THE VARIABLES TO MAKE SURE THEY DO NOT ADD UP NUMBERS FROM PREVIOUS ITERATIONS\n for(int reinit=0; reinit<*n_studies; reinit++)\n {\n resY1[reinit] = 0.0;\n resY2[reinit] = 0.0;\n resY3[reinit] = 0.0;\n resY4[reinit] = 0.0;\n resPI_a[reinit] = 0.0;\n resPI_b[reinit] = 0.0;\n A[reinit] = 0;\n B[reinit] = 0;\n }\n for(int reinit_4=0; reinit_4<*total; reinit_4++)\n {\n res_probYij[reinit_4] = 0.0;\n res_Yij[reinit_4] = 0.0;\n alpha_rep[reinit_4] = 0;\n theta_rep[reinit_4] = 0;\n mu[reinit_4] = 0;\n sd[reinit_4] = 0;\n rij[reinit_4] = 0;\n }\n\n\n\n\n\t}//big_loop\n\t\n\t\n\tfwrite(matrix_PI, sizeof(matrix_PI[0]), (*iter)*(*n_studies), PI_file);\n\tfwrite(matrix_S2, sizeof(matrix_S2[0]), (*iter)*(*refstd), S2_file);\n\tfwrite(matrix_C2, sizeof(matrix_C2[0]), (*iter)*(*refstd), C2_file);\n\tfwrite(matrix_theta, sizeof(matrix_theta[0]), (*iter)*(*n_studies), theta_file);\n\tfwrite(matrix_alpha, sizeof(matrix_alpha[0]), (*iter)*(*n_studies), alpha_file);\n\tfwrite(matrix_LAMBDA, sizeof(matrix_LAMBDA[0]), *iter, LAMBDA_file);\n\tfwrite(matrix_beta, sizeof(matrix_beta[0]), *iter, beta_file);\n\tfwrite(matrix_CTHETA, sizeof(matrix_CTHETA[0]), *iter, CTHETA_file);\n\tfwrite(matrix_sd_alpha, sizeof(matrix_sd_alpha[0]), *iter, sd_alpha_file);\n\tfwrite(matrix_sd_theta, sizeof(matrix_sd_theta[0]), *iter, sd_theta_file);\n\tfwrite(matrix_S1, sizeof(matrix_S1[0]), (*iter)*(*n_studies), S1_file);\n\tfwrite(matrix_C1, sizeof(matrix_C2[0]), (*iter)*(*n_studies), C1_file);\n\tfwrite(matrix_pool_C, sizeof(matrix_pool_C[0]), *iter, pool_C_file);\n\tfwrite(matrix_pool_S, sizeof(matrix_pool_S[0]), *iter, pool_S_file);\n\tfwrite(matrix_SNEW, sizeof(matrix_SNEW[0]), *iter, SNEW_file);\n\tfwrite(matrix_CNEW, sizeof(matrix_CNEW[0]), *iter, CNEW_file);\n\t\n\tfclose(PI_file);\n\tfclose(S2_file);\n\tfclose(C2_file);\n\tfclose(theta_file);\n\tfclose(alpha_file);\n\tfclose(LAMBDA_file);\n\tfclose(beta_file);\n\tfclose(CTHETA_file);\n\tfclose(sd_alpha_file);\n\tfclose(sd_theta_file);\n\tfclose(S1_file);\n\tfclose(C1_file);\n\tfclose(pool_C_file);\n\tfclose(pool_S_file);\n\tfclose(SNEW_file);\n\tfclose(CNEW_file);\n\n\n \n }\n\n}\n\n\n" }, { "alpha_fraction": 0.6931982636451721, "alphanum_fraction": 0.714078962802887, "avg_line_length": 69.10144805908203, "blob_id": "d90ce8c93c6dd1e2eb5c772d4fe3aa0b35fa286e", "content_id": "2734502504f8c5d1d600216cb62d323bba424255", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4837, "license_type": "no_license", "max_line_length": 606, "num_lines": 69, "path": "/src/forms/ui_diagnostic_explain_dlg.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'diag_explain_form.ui'\n#\n# Created: Wed Apr 17 14:37:19 2013\n# by: PyQt4 UI code generator 4.10\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt4 import QtCore, QtGui\n\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n def _fromUtf8(s):\n return s\n\ntry:\n _encoding = QtGui.QApplication.UnicodeUTF8\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig, _encoding)\nexcept AttributeError:\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig)\n\nclass Ui_diag_explain_window(object):\n def setupUi(self, diag_explain_window):\n diag_explain_window.setObjectName(_fromUtf8(\"diag_explain_window\"))\n diag_explain_window.resize(510, 239)\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(_fromUtf8(\":/images/meta.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n diag_explain_window.setWindowIcon(icon)\n diag_explain_window.setModal(True)\n self.verticalLayout_2 = QtGui.QVBoxLayout(diag_explain_window)\n self.verticalLayout_2.setObjectName(_fromUtf8(\"verticalLayout_2\"))\n self.verticalLayout = QtGui.QVBoxLayout()\n self.verticalLayout.setObjectName(_fromUtf8(\"verticalLayout\"))\n self.textBrowser = QtGui.QTextBrowser(diag_explain_window)\n self.textBrowser.setObjectName(_fromUtf8(\"textBrowser\"))\n self.verticalLayout.addWidget(self.textBrowser)\n self.dont_show_again_chk_box = QtGui.QCheckBox(diag_explain_window)\n self.dont_show_again_chk_box.setObjectName(_fromUtf8(\"dont_show_again_chk_box\"))\n self.verticalLayout.addWidget(self.dont_show_again_chk_box)\n self.buttonBox = QtGui.QDialogButtonBox(diag_explain_window)\n self.buttonBox.setOrientation(QtCore.Qt.Horizontal)\n self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Ok)\n self.buttonBox.setObjectName(_fromUtf8(\"buttonBox\"))\n self.verticalLayout.addWidget(self.buttonBox)\n self.verticalLayout_2.addLayout(self.verticalLayout)\n\n self.retranslateUi(diag_explain_window)\n QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8(\"accepted()\")), diag_explain_window.accept)\n QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8(\"rejected()\")), diag_explain_window.reject)\n QtCore.QMetaObject.connectSlotsByName(diag_explain_window)\n\n def retranslateUi(self, diag_explain_window):\n diag_explain_window.setWindowTitle(_translate(\"diag_explain_window\", \"Diagnostic MA with Multiple Metrics\", None))\n self.textBrowser.setHtml(_translate(\"diag_explain_window\", \"<!DOCTYPE HTML PUBLIC \\\"-//W3C//DTD HTML 4.0//EN\\\" \\\"http://www.w3.org/TR/REC-html40/strict.dtd\\\">\\n\"\n\"<html><head><meta name=\\\"qrichtext\\\" content=\\\"1\\\" /><style type=\\\"text/css\\\">\\n\"\n\"p, li { white-space: pre-wrap; }\\n\"\n\"</style></head><body style=\\\" font-family:\\'Lucida Grande\\'; font-size:13pt; font-weight:400; font-style:normal;\\\">\\n\"\n\"<p style=\\\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\\\"><span style=\\\" font-family:\\'MS Shell Dlg 2\\'; font-size:14pt; font-weight:600;\\\">A note on diagnostic meta-analysis for multiple metrics</span></p>\\n\"\n\"<p style=\\\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\\'MS Shell Dlg 2\\'; font-size:14pt;\\\"><br /></p>\\n\"\n\"<p style=\\\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\\\"><span style=\\\" font-family:\\'MS Shell Dlg 2\\'; font-size:12pt;\\\">It looks like you\\'re conducting meta-analyses for multiple diagnostic metrics. </span></p>\\n\"\n\"<p style=\\\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\\'MS Shell Dlg 2\\'; font-size:12pt;\\\"><br /></p>\\n\"\n\"<p style=\\\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\\\"><span style=\\\" font-family:\\'MS Shell Dlg 2\\'; font-size:12pt;\\\">Due to statistical properties of these metrics, different methods are available for sensitivity and specificity than are for likelihood and diagnostic odds ratios. So now we\\'re going to first ask you to pick a meta-analysis method (and parameters) to use for sensitivity/specificity, and then we\\'re going to ask you to pick a method to use for likelihood and diagnostic odds ratios. </span></p></body></html>\", None))\n self.dont_show_again_chk_box.setText(_translate(\"diag_explain_window\", \"don\\'t show this message next time\", None))\n\nimport icons_rc\n" }, { "alpha_fraction": 0.6961827278137207, "alphanum_fraction": 0.7168335318565369, "avg_line_length": 46.68656539916992, "blob_id": "7e65f1c8c9d6c6d26941928db45a717ed4ac5678", "content_id": "d677e2f672f0291bc4727ff8695965e1410aafaa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3196, "license_type": "no_license", "max_line_length": 173, "num_lines": 67, "path": "/src/forms/ui_choose_metric_page.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'choose_metric_page.ui'\n#\n# Created: Thu Jun 27 10:21:34 2013\n# by: PyQt4 UI code generator 4.10.1\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt4 import QtCore, QtGui\n\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n def _fromUtf8(s):\n return s\n\ntry:\n _encoding = QtGui.QApplication.UnicodeUTF8\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig, _encoding)\nexcept AttributeError:\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig)\n\nclass Ui_WizardPage(object):\n def setupUi(self, WizardPage):\n WizardPage.setObjectName(_fromUtf8(\"WizardPage\"))\n WizardPage.resize(400, 220)\n sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(WizardPage.sizePolicy().hasHeightForWidth())\n WizardPage.setSizePolicy(sizePolicy)\n WizardPage.setMinimumSize(QtCore.QSize(400, 220))\n WizardPage.setMaximumSize(QtCore.QSize(400, 220))\n WizardPage.setSubTitle(_fromUtf8(\"\"))\n self.verticalLayout = QtGui.QVBoxLayout(WizardPage)\n self.verticalLayout.setObjectName(_fromUtf8(\"verticalLayout\"))\n self.label_2 = QtGui.QLabel(WizardPage)\n self.label_2.setWordWrap(True)\n self.label_2.setObjectName(_fromUtf8(\"label_2\"))\n self.verticalLayout.addWidget(self.label_2)\n self.horizontalLayout = QtGui.QHBoxLayout()\n self.horizontalLayout.setObjectName(_fromUtf8(\"horizontalLayout\"))\n self.label = QtGui.QLabel(WizardPage)\n self.label.setObjectName(_fromUtf8(\"label\"))\n self.horizontalLayout.addWidget(self.label)\n self.metric_cbo_box = QtGui.QComboBox(WizardPage)\n self.metric_cbo_box.setObjectName(_fromUtf8(\"metric_cbo_box\"))\n self.horizontalLayout.addWidget(self.metric_cbo_box)\n spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)\n self.horizontalLayout.addItem(spacerItem)\n self.verticalLayout.addLayout(self.horizontalLayout)\n spacerItem1 = QtGui.QSpacerItem(20, 46, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)\n self.verticalLayout.addItem(spacerItem1)\n\n self.retranslateUi(WizardPage)\n QtCore.QMetaObject.connectSlotsByName(WizardPage)\n\n def retranslateUi(self, WizardPage):\n WizardPage.setWindowTitle(_translate(\"WizardPage\", \"WizardPage\", None))\n WizardPage.setTitle(_translate(\"WizardPage\", \"Choose a metric...\", None))\n self.label_2.setText(_translate(\"WizardPage\", \"Please choose the appropriate metric for the data type you just selected or just accept the listed default.\\n\"\n\"\\n\"\n\"Note, however, that if you are importing data via a CSV and you have effect data, it is ESSENTIAL that you choose the proper metric corresponding to your data now.\", None))\n self.label.setText(_translate(\"WizardPage\", \"Metric:\", None))\n\n" }, { "alpha_fraction": 0.38348081707954407, "alphanum_fraction": 0.389380544424057, "avg_line_length": 14.142857551574707, "blob_id": "835add24fdee52f850ad0f0512902346b6697936", "content_id": "feb49d94308c48c52ac5240550d46abc54c46458", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 339, "license_type": "no_license", "max_line_length": 36, "num_lines": 21, "path": "/src/R/HSROC/R/truncunif.R", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "truncunif <-\r\nfunction (bornes, prior.l, prior.u) \r\n{\r\n l = bornes[1]\r\n u = bornes[2]\r\n ll = numeric()\r\n uu = numeric()\r\n if (l < prior.l) {\r\n ll = prior.l\r\n }\r\n else {\r\n ll = l\r\n }\r\n if (u > prior.u) {\r\n uu = prior.u\r\n }\r\n else {\r\n uu = u\r\n }\r\n return(cbind(ll, uu))\r\n}\r\n" }, { "alpha_fraction": 0.647341787815094, "alphanum_fraction": 0.6731645464897156, "avg_line_length": 42.406593322753906, "blob_id": "c18b73e3e5dee90359a48d68aec6db96ebebe0ae", "content_id": "34b2e93422ce3ea07decded0ad7cb053ee3fefbc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3950, "license_type": "no_license", "max_line_length": 106, "num_lines": 91, "path": "/src/forms/ui_new_outcome.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'new_outcome_dlg.ui'\n#\n# Created: Wed Apr 17 14:37:20 2013\n# by: PyQt4 UI code generator 4.10\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt4 import QtCore, QtGui\n\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n def _fromUtf8(s):\n return s\n\ntry:\n _encoding = QtGui.QApplication.UnicodeUTF8\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig, _encoding)\nexcept AttributeError:\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig)\n\nclass Ui_Dialog(object):\n def setupUi(self, Dialog):\n Dialog.setObjectName(_fromUtf8(\"Dialog\"))\n Dialog.setEnabled(True)\n Dialog.resize(301, 132)\n Dialog.setMinimumSize(QtCore.QSize(301, 132))\n Dialog.setMaximumSize(QtCore.QSize(301, 132))\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n Dialog.setFont(font)\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(_fromUtf8(\":/images/meta.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n Dialog.setWindowIcon(icon)\n self.buttonBox = QtGui.QDialogButtonBox(Dialog)\n self.buttonBox.setGeometry(QtCore.QRect(10, 90, 281, 32))\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n self.buttonBox.setFont(font)\n self.buttonBox.setOrientation(QtCore.Qt.Horizontal)\n self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)\n self.buttonBox.setObjectName(_fromUtf8(\"buttonBox\"))\n self.layoutWidget = QtGui.QWidget(Dialog)\n self.layoutWidget.setGeometry(QtCore.QRect(10, 10, 281, 71))\n self.layoutWidget.setObjectName(_fromUtf8(\"layoutWidget\"))\n self.gridLayout = QtGui.QGridLayout(self.layoutWidget)\n self.gridLayout.setMargin(0)\n self.gridLayout.setObjectName(_fromUtf8(\"gridLayout\"))\n self.label_2 = QtGui.QLabel(self.layoutWidget)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n font.setPointSize(10)\n self.label_2.setFont(font)\n self.label_2.setObjectName(_fromUtf8(\"label_2\"))\n self.gridLayout.addWidget(self.label_2, 0, 0, 1, 1)\n self.outcome_name_le = QtGui.QLineEdit(self.layoutWidget)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n self.outcome_name_le.setFont(font)\n self.outcome_name_le.setAlignment(QtCore.Qt.AlignCenter)\n self.outcome_name_le.setObjectName(_fromUtf8(\"outcome_name_le\"))\n self.gridLayout.addWidget(self.outcome_name_le, 0, 1, 1, 1)\n self.label = QtGui.QLabel(self.layoutWidget)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n font.setPointSize(10)\n self.label.setFont(font)\n self.label.setObjectName(_fromUtf8(\"label\"))\n self.gridLayout.addWidget(self.label, 1, 0, 1, 1)\n self.datatype_cbo_box = QtGui.QComboBox(self.layoutWidget)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n self.datatype_cbo_box.setFont(font)\n self.datatype_cbo_box.setObjectName(_fromUtf8(\"datatype_cbo_box\"))\n self.gridLayout.addWidget(self.datatype_cbo_box, 1, 1, 1, 1)\n\n self.retranslateUi(Dialog)\n QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8(\"accepted()\")), Dialog.accept)\n QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8(\"rejected()\")), Dialog.reject)\n QtCore.QMetaObject.connectSlotsByName(Dialog)\n\n def retranslateUi(self, Dialog):\n Dialog.setWindowTitle(_translate(\"Dialog\", \"add new outcome\", None))\n self.label_2.setText(_translate(\"Dialog\", \"outcome name:\", None))\n self.label.setText(_translate(\"Dialog\", \"type of outcome:\", None))\n\nimport icons_rc\n" }, { "alpha_fraction": 0.5571955442428589, "alphanum_fraction": 0.5608856081962585, "avg_line_length": 27.526315689086914, "blob_id": "7ad40f53b8d0a8cb2ab367869636220abadabbf8", "content_id": "ad6617e154dc9441f5d065faf1e8eeb3447b6d2b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 544, "license_type": "no_license", "max_line_length": 71, "num_lines": 19, "path": "/src/ma_text_edit.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "import sys\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nimport pdb\n\nclass MATextEdit(QTextEdit):\n def __init__(self, *args):\n QTextEdit.__init__(self, *args)\n\n def event(self, event):\n if event.type()==QEvent.KeyPress:\n if event.key()==Qt.Key_Enter or event.key()==Qt.Key_Return:\n self.emit(SIGNAL(\"returnPressed\"))\n return True\n elif event.key()==Qt.Key_Up:\n print \"up??\"\n return True\n\n return QTextEdit.event(self, event)\n" }, { "alpha_fraction": 0.32236841320991516, "alphanum_fraction": 0.3815789520740509, "avg_line_length": 19.714284896850586, "blob_id": "c9fbfa99d3337e0d85647361abf59ca11bde5a28", "content_id": "41454d7b6d6228bd24f7c8b8f777c36436b76803", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 152, "license_type": "no_license", "max_line_length": 65, "num_lines": 7, "path": "/src/R/HSROC/R/B.fonction.R", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "B.fonction <-\r\nfunction (a, b) \r\n{\r\n result = sum((a[, 3] * a[, 1]^2 + a[, 4] * a[, 2]^2)/exp(b * \r\n (a[, 1] - 0.5)))\r\n return(result)\r\n}\r\n" }, { "alpha_fraction": 0.3598971664905548, "alphanum_fraction": 0.37789201736450195, "avg_line_length": 18.473684310913086, "blob_id": "b004b55e37cfb4040e6a99e51fb8397b76e58bb9", "content_id": "5f65c5653567a10af63a5a21a90022698b7236da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 389, "license_type": "no_license", "max_line_length": 59, "num_lines": 19, "path": "/src/R/HSROC/R/truncgamma.R", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "truncgamma <-\r\nfunction (n = 1, shape, scale, l, u) \r\n{\r\n l1 <- pgamma(l, shape, scale = scale)\r\n u1 <- pgamma(u, shape, scale = scale)\r\n x <- runif(n, l1, u1)\r\n if (x == 0) {\r\n y = u\r\n }\r\n else {\r\n if (x == 1) {\r\n y = l\r\n }\r\n else {\r\n y = qgamma(p = x, shape = shape, scale = scale)\r\n }\r\n }\r\n return(y)\r\n}\r\n" }, { "alpha_fraction": 0.41605839133262634, "alphanum_fraction": 0.4489051103591919, "avg_line_length": 22.909090042114258, "blob_id": "5f386de23b8428f5b9bbb4237deb50c25deb261e", "content_id": "24f5a3dbe0640f99c3c42a272b3aec591ea9b484", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 274, "license_type": "no_license", "max_line_length": 63, "num_lines": 11, "path": "/src/R/HSROC/R/DATA.organizer.R", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "DATA.organizer <-\r\nfunction (d, m) \r\n{\r\n n = rowSums(d)\r\n all.studies = c()\r\n for (i in 1:m) {\r\n all.studies = c(all.studies, c(rep(1, d[i, 1]), rep(2, \r\n d[i, 2]), rep(3, d[i, 3]), rep(0, d[i, 4])))\r\n }\r\n results = list(n, all.studies)\r\n}\r\n" }, { "alpha_fraction": 0.6389032006263733, "alphanum_fraction": 0.645548403263092, "avg_line_length": 38.74615478515625, "blob_id": "fe82f02991fde7c4645068cdd0a3df06a6fa98b4", "content_id": "598b50c1a0be58ea2cfe5d24ca228cdd4c9dfa34", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 15500, "license_type": "no_license", "max_line_length": 165, "num_lines": 390, "path": "/src/R/openmetar/R/continuous_methods.r", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "####################################\n# OpenMeta[Analyst] #\n# ---- #\n# continuous_methods.r # \n# Facade module; wraps methods #\n# that perform analyses on # \n# continuous data in a coherent #\n# interface. #\n####################################\n\nlibrary(metafor)\n\ncontinuous.two.arm.metrics <- c(\"MD\", \"SMD\")\ncontinuous.one.arm.metrics <- c(\"TXMean\")\n\ncompute.for.one.cont.study <- function(cont.data, params) {\n n1i <- cont.data@N1\n n2i <- cont.data@N2\n m1i <- cont.data@mean1\n m2i <- cont.data@mean2\n sd1i <- cont.data@sd1\n sd2i <- cont.data@sd2\n res <- escalc(params$measure, n1i=n1i, n2i=n2i, m1i=m1i, m2i=m2i, sd1i=sd1i, sd2i=sd2i)\n res\n}\n\ncontinuous.transform.f <- function(metric.str) {\n display.scale <- function(x, ...) {\n x\n }\n\n calc.scale <- function(x, ...) {\n x\n }\n\n list(display.scale = display.scale, calc.scale = calc.scale)\n}\n\nget.res.for.one.cont.study <- function(cont.data, params){\n # this method can be called when there is only one study to\n # get the point estimate and lower/upper bounds.\n y<-NULL\n se<-NULL\n if (length(cont.data@y) == 0 || is.na(cont.data@y)) {\n res <- compute.for.one.cont.study(cont.data, params)\n y <- res$yi[1]\n se <- sqrt(res$vi[1])\n }\n else {\n y <- cont.data@y[1]\n se <- cont.data@SE[1]\n }\n # note: conf.level is given as, e.g., 95, rather than .95.\n alpha <- 1.0-(params$conf.level/100.0)\n mult <- abs(qnorm(alpha/2.0))\n ub <- y + mult*se\n lb <- y - mult*se\n # we make lists to comply with the get.overall method\n res <- list(\"b\"=c(y), \"ci.lb\"=lb, \"ci.ub\"=ub, \"se\"=se)\n res\n}\n\ncreate.cont.data.array <- function(cont.data, params, res) {\n # Extracts data from cont.data and puts it into an array for the the first summary display table.\n tx1.name <- \"tx A\"\n tx2.name <- \"tx B\"\n # TODO: these should be taken from the corresponding column labels in the GUI and passed in via params.\n digits.str <- paste(\"%.\", params$digits, \"f\", sep=\"\")\n effect.size.name <- pretty.metric.name(as.character(params$measure))\n # Caculate confidence intervals\n study.ci.bounds <- calc.ci.bounds(cont.data, params)\n y.disp <- continuous.transform.f(params$measure)$display.scale(cont.data@y)\n lb.disp <- continuous.transform.f(params$measure)$display.scale(study.ci.bounds$lb)\n ub.disp <- continuous.transform.f(params$measure)$display.scale(study.ci.bounds$ub)\n y <- sprintf(digits.str, y.disp)\n LL <- sprintf(digits.str, lb.disp)\n UL <- sprintf(digits.str, ub.disp)\n weights <- res$study.weights\n weights <- sprintf(digits.str, weights)\n weights <- format(weights, justify=\"right\")\n # Extract the data from cont.data and round\n N.txA <- format(cont.data@N1, justify=\"right\")\n mean.txA <- sprintf(digits.str, cont.data@mean1)\n sd.txA <- sprintf(digits.str, cont.data@sd1)\n if (params$measure %in% continuous.two.arm.metrics) {\n N.txB <- format(cont.data@N2, justify=\"right\")\n mean.txB <- sprintf(digits.str, cont.data@mean2)\n sd.txB <- sprintf(digits.str, cont.data@sd2)\n raw.data <- array(\n c(\"Study\", [email protected],\n paste(tx1.name, \" N\", sep=\"\"), N.txA,\n paste(tx1.name, \" Mean\", sep=\"\"), mean.txA,\n paste(tx1.name, \" SD\", sep=\"\"), sd.txA,\n paste(tx2.name, \" N\", sep=\"\"), N.txB,\n paste(tx2.name, \" Mean\", sep=\"\"), mean.txB,\n paste(tx2.name, \" SD\", sep=\"\"), sd.txB,\n effect.size.name, y, \"Lower\", LL, \"Upper\", UL, \"Weight\", weights),\n dim=c(length([email protected]) + 1, 11))\n } else if (params$measure %in% continuous.one.arm.metrics) {\n raw.data <- array(\n c(\"Study\", [email protected],\n paste(tx1.name, \" N\", sep=\"\"), N.txA,\n paste(tx1.name, \" Mean\", sep=\"\"), mean.txA,\n paste(tx1.name, \" SD\", sep=\"\"), sd.txA,\n effect.size.name, y, \"Lower\", LL, \"Upper\", UL, \"Weight\", weights),\n dim=c(length([email protected]) + 1, 8))\n }\n class(raw.data) <- \"summary.data\" \n return(raw.data)\n}\n\nwrite.cont.study.data.to.file <- function(cont.data, params, res, data.outpath) {\n # create data frame and write to csv\n effect.size.name <- pretty.metric.name(as.character(params$measure))\n # Caculate confidence intervals\n study.ci.bounds <- calc.ci.bounds(cont.data, params)\n y.disp <- continuous.transform.f(params$measure)$display.scale(cont.data@y)\n if (params$measure %in% continuous.two.arm.metrics) {\n study.data.df <- data.frame(\n \"study.names\"=paste([email protected], \" \", cont.data@years, sep=\"\"),\n \"N1\" = cont.data@N1,\n \"mean1\" = cont.data@mean1,\n \"sd1\" = cont.data@sd1,\n \"N2\" = cont.data@N2,\n \"mean2\" = cont.data@mean2,\n \"sd2\" = cont.data@sd1,\n \"Effect.size\" = continuous.transform.f(params$measure)$display.scale(cont.data@y),\n \"Lower.bound\" = continuous.transform.f(params$measure)$display.scale(study.ci.bounds$lb),\n \"Upper.bound\" = continuous.transform.f(params$measure)$display.scale(study.ci.bounds$ub),\n \"Weight\" = res$study.weights)\n } else if(params$measure %in% continuous.one.arm.metrics) {\n study.data.df <- data.frame(\n \"study.names\"=paste([email protected], \" \", cont.data@years, sep=\"\"),\n \"N1\" = cont.data@N1,\n \"mean1\" = cont.data@mean1,\n \"sd1\" = cont.data@sd1,\n \"Effect.size\" = continuous.transform.f(params$measure)$display.scale(cont.data@y),\n \"Lower.bound\" = continuous.transform.f(params$measure)$display.scale(study.ci.bounds$lb),\n \"Upper.bound\" = continuous.transform.f(params$measure)$display.scale(study.ci.bounds$ub),\n \"Weight\" = res$study.weights)\n }\n # Rename effect size column\n names(study.data.df)[names(study.data.df)==\"Effect.size\"] <- effect.size.name\n write.csv(study.data.df, file=data.outpath, append=FALSE, row.names=FALSE)\n}\n\n###############################\n# continuous fixed effects #\n###############################\ncontinuous.fixed <- function(cont.data, params){\n # assert that the argument is the correct type\n if (!(\"ContinuousData\" %in% class(cont.data))) stop(\"Continuous data expected.\")\n \n results <- NULL\n input.params <- params\n\n if (length([email protected]) == 1){\n # handle the case where only one study was passed in\n res <- get.res.for.one.cont.study(cont.data, params)\n # Package res for use by overall method.\n results <- list(\"Summary\"=res, \"res\"=res)\n }\n else {\n res<-rma.uni(\n yi=cont.data@y,\n sei=cont.data@SE,\n [email protected],\n method=\"FE\",\n level=params$conf.level,\n digits=params$digits)\n pure.res <- res\n\n\t\t# add weights\n\t\tres$weights <- weights(res)\n\t\tresults <- list(\"Summary\"=res)\n\t\t\n # Create forest plot and list to display summary of results\n metric.name <- pretty.metric.name(as.character(params$measure))\n model.title <- paste(\"Continuous Fixed-Effect Model\\n\\nMetric: \", metric.name, sep=\"\")\n summary.disp <- create.summary.disp(cont.data, params, res, model.title)\n #\n # generate forest plot\n #\n forest.path <- paste(params$fp_outpath, sep=\"\")\n plot.data <- create.plot.data.continuous(cont.data, params, res)\n changed.params <- plot.data$changed.params\n\t\t\n\t\tforest.plot.params.path <- \"\"\n\t\tif (is.null(params$supress.output) || !params$supress.output) {\n # list of changed params values\n params.changed.in.forest.plot <- forest.plot(forest.data=plot.data, outpath=forest.path)\n changed.params <- c(changed.params, params.changed.in.forest.plot)\n params[names(changed.params)] <- changed.params\n # dump the forest plot params to disk; return path to\n # this .Rdata for later use\n forest.plot.params.path <- save.data(cont.data, res, params, plot.data)\n\t\t}\n\n # Now we package the results in a dictionary (technically, a named\n # vector). In particular, there are two fields that must be returned;\n # a dictionary of images (mapping titles to image paths) and a list of texts\n # (mapping titles to pretty-printed text). In this case we have only one\n # of each.\n plot.params.paths <- c(\"Forest Plot\"=forest.plot.params.path)\n images <- c(\"Forest Plot\"=forest.path)\n plot.names <- c(\"forest plot\"=\"forest_plot\")\n\t\tpure.res$weights <- weights(res)\n results <- list(\n \"input_data\"=cont.data,\n \"input_params\"=input.params,\n \"images\"=images,\n \"Summary\"=capture.output.and.collapse(summary.disp),\n \"plot_names\"=plot.names,\n \"plot_params_paths\"=plot.params.paths,\n \"res\"=pure.res,\n \"res.info\"=continuous.fixed.value.info(),\n \"weights\"=weights(res))\n }\n\t\n # removing \n\t#references <- \"\" #\"Fixed Effects Inverse Variance: this is a placeholder for continuous fixed reference\"\n\t#results[[\"References\"]] <- references\n\t\n results\n}\n\ncontinuous.fixed.parameters <- function(){\n # parameters\n params <- list(\"conf.level\"=\"float\", \"digits\"=\"int\")\n\n # default values\n defaults <- list(\"conf.level\"=95, \"digits\"=3)\n\n var_order <- c(\"conf.level\", \"digits\")\n parameters <- list(\"parameters\"=params, \"defaults\"=defaults, \"var_order\"=var_order)\n}\n\ncontinuous.fixed.pretty.names <- function() {\n pretty.names <- list(\n \"pretty.name\"=\"Continuous Fixed-Effect Inverse Variance\",\n \"description\" = \"Performs fixed-effect meta-analysis with inverse variance weighting.\",\n \"conf.level\"=list(\"pretty.name\"=\"Confidence level\", \"description\"=\"Level at which to compute confidence intervals\"),\n \"digits\"=list(\"pretty.name\"=\"Number of digits\", \"description\"=\"Number of digits to display in results\"),\n \"adjust\"=list(\"pretty.name\"=\"Correction factor\", \"description\"=\"Constant c that is added to the entries of a two-by-two table.\"),\n \"to\"=list(\"pretty.name\"=\"Add correction factor to\", \"description\"=\"When Add correction factor is set to \\\"only 0\\\", the correction factor\n is added to all cells of each two-by-two table that contains at leason one zero. When set to \\\"all\\\", the correction factor\n is added to all two-by-two tables if at least one table contains a zero.\")\n )\n}\n\ncontinuous.fixed.overall <- function(results){\n # this parses out the overall from the computed result\n res <- results$res\n}\n\n###############################\n# continuous random effects #\n###############################\ncontinuous.random <- function(cont.data, params) {\n # assert that the argument is the correct type\n if (!(\"ContinuousData\" %in% class(cont.data)))\n stop(\"Continuous data expected.\")\n\n results <- NULL\n\tinput.params <- params\n\t\n if (length([email protected]) == 1) {\n # handle the case where only one study was passed in\n res <- get.res.for.one.cont.study(cont.data, params)\n # Package res for use by overall method.\n results <- list(\"Summary\"=res, \"res\"=res)\n }\n else {\n res<-rma.uni(\n yi=cont.data@y, sei=cont.data@SE,\n [email protected],\n method=params$rm.method,\n level=params$conf.level,\n digits=params$digits)\n pure.res<-res\n \n\t\t# add weights\n\t\tres$weights <- weights(res)\n results <- list(\"Summary\"=res)\n\n # Create forest plot and list to display summary of results\n metric.name <- pretty.metric.name(as.character(params$measure))\n model.title <- paste(\"Continuous Random-Effects Model\\n\\nMetric: \", metric.name, sep=\"\")\n summary.disp <- create.summary.disp(cont.data, params, res, model.title)\n\n #### Generate forest plot ####\n forest.path <- paste(params$fp_outpath, sep=\"\")\n plot.data <- create.plot.data.continuous(cont.data, params, res)\n changed.params <- plot.data$changed.params\n\t\t\n\t\tforest.plot.params.path <- \"\"\n\t\tif (is.null(params$supress.output) || !params$supress.output) {\n # list of changed params values\n params.changed.in.forest.plot <- forest.plot(forest.data=plot.data, outpath=forest.path)\n changed.params <- c(changed.params, params.changed.in.forest.plot)\n params[names(changed.params)] <- changed.params\n # dump the forest plot params to disk; return path to\n # this .Rdata for later use\n forest.plot.params.path <- save.data(cont.data, res, params, plot.data)\n\t\t}\n\t\t\n # Now we package the results in a dictionary (technically, a named\n # vector). In particular, there are two fields that must be returned;\n # a dictionary of images (mapping titles to image paths) and a list of texts\n # (mapping titles to pretty-printed text). In this case we have only one\n # of each.\n plot.params.paths <- c(\"Forest Plot\"=forest.plot.params.path)\n images <- c(\"Forest Plot\"=forest.path)\n plot.names <- c(\"forest plot\"=\"forest_plot\")\n pure.res$weights <- weights(res)\n results <- list(\n \"input_data\"=cont.data,\n \"input_params\"=input.params,\n \"images\"=images,\n \"Summary\"=capture.output.and.collapse(summary.disp),\n \"plot_names\"=plot.names,\n \"plot_params_paths\"=plot.params.paths,\n \"res\"=pure.res,\n \"res.info\"=continuous.random.value.info(),\n \"weights\"=weights(res))\n }\n\t\n # possible reference below; for now just removing the 'references' section. we may want to revisit this at some point.\n\t#references <- \"Random effects: DerSimonian and Laird. Meta-analysis in clinical trials. Controlled Clinical Trials. 7(3) (1986): 177-188.\"\n\t#results[[\"References\"]] <- references\n\t\n results\n}\n\ncontinuous.random.value.info <- function() {\n rma.uni.value.info()\n}\n\ncontinuous.fixed.value.info <- function() {\n rma.uni.value.info()\n}\n\n\ncontinuous.random.parameters <- function() {\n # parameters\n rm_method_ls <- c(\"HE\", \"DL\", \"SJ\", \"ML\", \"REML\", \"EB\")\n\n params <- list(\"rm.method\"=rm_method_ls, \"conf.level\"=\"float\", \"digits\"=\"int\")\n\n # default values\n defaults <- list(\"rm.method\"=\"DL\", \"conf.level\"=95, \"digits\"=3)\n\n var_order <- c(\"rm.method\", \"conf.level\", \"digits\")\n parameters <- list(\"parameters\"=params, \"defaults\"=defaults, \"var_order\"=var_order)\n}\n\ncontinuous.random.pretty.names <- function() {\n\t# sort of redundant to have both this and rm_method_ls but whatever for now...\n\trm_method_names <- list(\n HE = \"Hedges-Olkin\",\n DL = \"DerSimonian-Laird\",\n SJ = \"Sidik-Jonkman\",\n ML = \"Maximum Likelihood\",\n REML = \"Restricted Maximum Likelihood\",\n EB = \"Empirical Bayes\")\n\t\n pretty.names <- list(\n \"pretty.name\"=\"Continuous Random-Effects\",\n \"description\" = \"Performs random-effects meta-analysis.\",\n \"rm.method\"=list(\"pretty.name\"=\"Random-Effects method\", \"description\"=\"Method for estimating between-studies heterogeneity\", \"rm.method.names\"=rm_method_names),\n \"conf.level\"=list(\"pretty.name\"=\"Confidence level\", \"description\"=\"Level at which to compute confidence intervals\"),\n \"digits\"=list(\"pretty.name\"=\"Number of digits of precision to display\", \"description\"=\"Number of digits to display in results\"),\n \"adjust\"=list(\"pretty.name\"=\"Correction factor\", \"description\"=\"Constant c that is added to the entries of a two-by-two table.\"),\n \"to\"=list(\"pretty.name\"=\"Cells to which correction factor should be added\", \"description\"=\"When Add correction factor is set to \\\"only 0\\\", the correction factor\n is added to all cells of each two-by-two table that contains at leason one zero. When set to \\\"all\\\", the correction factor\n is added to all two-by-two tables if at least one table contains a zero.\")\n )\n}\n\ncontinuous.random.overall <- function(results){\n # this parses out the overall from the computed result\n res <- results$res\n}\n\ncontinuous.fixed.is.feasible.for.funnel <- function () {\n\tTRUE\n}\ncontinuous.random.is.feasible.for.funnel <- function () {\n\tTRUE\n}" }, { "alpha_fraction": 0.7011532187461853, "alphanum_fraction": 0.7248764634132385, "avg_line_length": 46.421875, "blob_id": "7fc1b74ac3ec64a7a5d1495a050e7ba26bd85af6", "content_id": "39145772fc448c20ebe3e8c52ebf05fe0d0ba8c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3035, "license_type": "no_license", "max_line_length": 112, "num_lines": 64, "path": "/src/forms/ui_change_cov_type.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'change_cov_type_form.ui'\n#\n# Created: Wed Apr 17 14:37:19 2013\n# by: PyQt4 UI code generator 4.10\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt4 import QtCore, QtGui\n\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n def _fromUtf8(s):\n return s\n\ntry:\n _encoding = QtGui.QApplication.UnicodeUTF8\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig, _encoding)\nexcept AttributeError:\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig)\n\nclass Ui_ChangeCovTypeForm(object):\n def setupUi(self, ChangeCovTypeForm):\n ChangeCovTypeForm.setObjectName(_fromUtf8(\"ChangeCovTypeForm\"))\n ChangeCovTypeForm.resize(484, 428)\n ChangeCovTypeForm.setMinimumSize(QtCore.QSize(400, 0))\n ChangeCovTypeForm.setMaximumSize(QtCore.QSize(100000, 555))\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n ChangeCovTypeForm.setFont(font)\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(_fromUtf8(\":/images/meta.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n ChangeCovTypeForm.setWindowIcon(icon)\n self.verticalLayout_2 = QtGui.QVBoxLayout(ChangeCovTypeForm)\n self.verticalLayout_2.setObjectName(_fromUtf8(\"verticalLayout_2\"))\n self.grp_box_preview = QtGui.QGroupBox(ChangeCovTypeForm)\n self.grp_box_preview.setObjectName(_fromUtf8(\"grp_box_preview\"))\n self.verticalLayout = QtGui.QVBoxLayout(self.grp_box_preview)\n self.verticalLayout.setObjectName(_fromUtf8(\"verticalLayout\"))\n self.cov_prev_table = QtGui.QTableView(self.grp_box_preview)\n self.cov_prev_table.setMaximumSize(QtCore.QSize(16777215, 16777215))\n self.cov_prev_table.setObjectName(_fromUtf8(\"cov_prev_table\"))\n self.verticalLayout.addWidget(self.cov_prev_table)\n self.verticalLayout_2.addWidget(self.grp_box_preview)\n self.buttonBox = QtGui.QDialogButtonBox(ChangeCovTypeForm)\n self.buttonBox.setOrientation(QtCore.Qt.Horizontal)\n self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)\n self.buttonBox.setObjectName(_fromUtf8(\"buttonBox\"))\n self.verticalLayout_2.addWidget(self.buttonBox)\n\n self.retranslateUi(ChangeCovTypeForm)\n QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8(\"accepted()\")), ChangeCovTypeForm.accept)\n QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8(\"rejected()\")), ChangeCovTypeForm.reject)\n QtCore.QMetaObject.connectSlotsByName(ChangeCovTypeForm)\n\n def retranslateUi(self, ChangeCovTypeForm):\n ChangeCovTypeForm.setWindowTitle(_translate(\"ChangeCovTypeForm\", \"Change Covariate Type\", None))\n self.grp_box_preview.setTitle(_translate(\"ChangeCovTypeForm\", \"values for new covariate\", None))\n\nimport icons_rc\n" }, { "alpha_fraction": 0.6742738485336304, "alphanum_fraction": 0.6919087171554565, "avg_line_length": 34.703704833984375, "blob_id": "c4e7aa1e5894d52ebd754a0f10966346494e3129", "content_id": "8fa43ff7a358f30e6e8bfc04f449c10b9e8f06f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1928, "license_type": "no_license", "max_line_length": 106, "num_lines": 54, "path": "/src/forms/ui_running.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'running.ui'\n#\n# Created: Wed Apr 17 14:37:20 2013\n# by: PyQt4 UI code generator 4.10\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt4 import QtCore, QtGui\n\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n def _fromUtf8(s):\n return s\n\ntry:\n _encoding = QtGui.QApplication.UnicodeUTF8\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig, _encoding)\nexcept AttributeError:\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig)\n\nclass Ui_running(object):\n def setupUi(self, running):\n running.setObjectName(_fromUtf8(\"running\"))\n running.setWindowModality(QtCore.Qt.ApplicationModal)\n running.resize(373, 70)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n running.setFont(font)\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(_fromUtf8(\":/images/meta.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n running.setWindowIcon(icon)\n running.setSizeGripEnabled(False)\n running.setModal(True)\n self.verticalLayout = QtGui.QVBoxLayout(running)\n self.verticalLayout.setObjectName(_fromUtf8(\"verticalLayout\"))\n self.progress_bar = QtGui.QProgressBar(running)\n self.progress_bar.setMaximum(0)\n self.progress_bar.setProperty(\"value\", -1)\n self.progress_bar.setAlignment(QtCore.Qt.AlignCenter)\n self.progress_bar.setObjectName(_fromUtf8(\"progress_bar\"))\n self.verticalLayout.addWidget(self.progress_bar)\n\n self.retranslateUi(running)\n QtCore.QMetaObject.connectSlotsByName(running)\n\n def retranslateUi(self, running):\n running.setWindowTitle(_translate(\"running\", \"running analysis...\", None))\n\nimport icons_rc\n" }, { "alpha_fraction": 0.44861647486686707, "alphanum_fraction": 0.4620761275291443, "avg_line_length": 46.3547477722168, "blob_id": "06c3424734d8c3cadef0320199cab8d1b92d1a4b", "content_id": "0ffc76ebb9089e02d586ab87598e55d864693f69", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 17311, "license_type": "no_license", "max_line_length": 167, "num_lines": 358, "path": "/src/R/HSROC/R/simdata.R", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "simdata <-\r\nfunction (N, n, n.random = FALSE, sub_rs = NULL, prev, se_ref = NULL, \r\n sp_ref = NULL, T, range.T = c(-Inf, Inf), L, range.L = c(-Inf, \r\n Inf), sd_t, sd_a, b, path = getwd()) \r\n{\r\n if (N < 1) {\r\n cat(\"Number of studies must be at least 1 or greater. \\n\")\r\n stop(\"Please respecify and call simdata() again.\\n\")\r\n }\r\n if (missing(N)) {\r\n cat(\"You must provide the number of studies 'N'. \\n\")\r\n stop(\"Please respecify and call simdata() again.\\n\")\r\n }\r\n if (missing(n)) {\r\n cat(\"You must provide the number of individuals 'n' within each study. \\n\")\r\n stop(\"Please respecify and call simdata() again.\\n\")\r\n }\r\n if (is.logical(n.random) == FALSE) {\r\n cat(\"The 'n.random' argument must be a logical object. \\n\")\r\n stop(\"Please respecify and call simdata() again.\\n\")\r\n }\r\n if (length(n) != N & length(n) != 1 & n.random == FALSE) {\r\n cat(\"You must provide the number of individuals 'n' for each study. \\n\")\r\n stop(\"Please respecify and call simdata() again.\\n\")\r\n }\r\n if (is.null(sub_rs) == TRUE) {\r\n n_rs = 1\r\n sub_rs = list(1, 1:N)\r\n }\r\n else {\r\n n_rs = sub_rs[[1]]\r\n }\r\n if (sub_rs[[1]] != (length(sub_rs) - 1)) {\r\n cat(paste(\"The value of the first element of 'sub_rs' (sub_rs[[1]] = \", \r\n sub_rs[[1]], \" ) does \\n\\t\\tnot match the number of remaining elements (length(sub_rs[[2:\", \r\n length(sub_rs), \"]])) = \", length(2:length(sub_rs)), \r\n \"\\n\", sep = \"\"))\r\n stop(\"Please respecify and call simdata() again.\\n\")\r\n }\r\n if (is.null(se_ref) == FALSE | is.null(sp_ref) == FALSE) {\r\n if ((length(se_ref) != sub_rs[[1]])) {\r\n cat(\"The number of reference standards in 'se_ref' and(or) \\n\\t\\t\\t\\t'sp_ref' is not matching the one defined in the \\n\\t\\t\\t\\t'sub_rs[[1]]' argument. \\n\")\r\n stop(\"Please respecify and call simdata() again.\\n\")\r\n }\r\n }\r\n if (is.null(sd_t) == TRUE) {\r\n SCO = TRUE\r\n }\r\n else {\r\n SCO = FALSE\r\n }\r\n if (length(prev) != 1 & length(prev) != N) \r\n stop(paste(\"You must provide M =\", N, \"elements for 'prev' argument\"), \r\n call. = FALSE)\r\n if (is.null(se_ref) != \"TRUE\" & length(se_ref) != sub_rs[[1]]) \r\n stop(\"Number of reference standards not matching the one defined for 'sub_rs' argument\", \r\n call. = FALSE)\r\n if (is.null(se_ref) != \"TRUE\" & length(sp_ref) != sub_rs[[1]]) \r\n stop(\"Number of reference standards not matching the one defined for 'sub_rs' argument\", \r\n call. = FALSE)\r\n if (missing(T)) \r\n stop(\"You must provide a value for parameter THETA ('T') \")\r\n if (missing(L)) \r\n stop(\"You must provide a value for parameter LAMBDA ('L')\")\r\n if (missing(sd_a)) \r\n stop(\"You must provide a value for parameter sd_alpha ('sd_a')\")\r\n if (missing(sd_t)) \r\n stop(\"You must provide a value for parameter sd_theta ('sd_t')\")\r\n if (missing(b)) \r\n stop(\"You must provide a value for parameter beta ('b')\")\r\n s2 = se_ref\r\n c2 = sp_ref\r\n pi = prev\r\n LOW_a = range.L[1]\r\n UP_a = range.L[2]\r\n LOW_t = range.T[1]\r\n UP_t = range.T[2]\r\n alpha = mapply(truncnorm2, rep(LOW_a, N), rep(UP_a, N), MoreArgs = list(L, \r\n sd_a, 1))\r\n if (SCO == FALSE) {\r\n theta = mapply(truncnorm2, rep(LOW_t, N), rep(UP_t, N), \r\n MoreArgs = list(T, sd_t, 1))\r\n }\r\n else {\r\n theta = rep(T, N)\r\n }\r\n if (is.null(s2) == \"TRUE\" & is.null(c2) == \"TRUE\") {\r\n Gold_Std = TRUE\r\n model = NULL\r\n }\r\n else {\r\n if (is.null(s2) == FALSE & is.null(c2) == FALSE) {\r\n Gold_Std = FALSE\r\n model = 1\r\n }\r\n }\r\n if (Gold_Std == TRUE) {\r\n se2 = sp2 = 1\r\n tp <- pnorm((theta - (alpha/2)) * exp(-b/2), mean = 0, \r\n sd = 1, lower.tail = FALSE)\r\n fp <- pnorm((theta + (alpha/2)) * exp(b/2), mean = 0, \r\n sd = 1, lower.tail = FALSE)\r\n TP_overall <- pnorm((T - (L/2)) * exp(-b/2), mean = 0, \r\n sd = 1, lower.tail = FALSE)\r\n FP_overall <- pnorm((T + (L/2)) * exp(b/2), mean = 0, \r\n sd = 1, lower.tail = FALSE)\r\n }\r\n else {\r\n if (Gold_Std == FALSE & model == 1) {\r\n se2 = sp2 = ni_rs = numeric()\r\n for (i in 1:n_rs) {\r\n ni_rs = c(ni_rs, length(sub_rs[[i + 1]]))\r\n se2 = c(se2, rep(s2[i], ni_rs[i]))\r\n sp2 = c(sp2, rep(c2[i], ni_rs[i]))\r\n }\r\n tp <- pnorm((theta - (alpha/2)) * exp(-b/2), mean = 0, \r\n sd = 1, lower.tail = FALSE)\r\n fp <- pnorm((theta + (alpha/2)) * exp(b/2), mean = 0, \r\n sd = 1, lower.tail = FALSE)\r\n TP_overall <- pnorm((T - (L/2)) * exp(-b/2), mean = 0, \r\n sd = 1, lower.tail = FALSE)\r\n FP_overall <- pnorm((T + (L/2)) * exp(b/2), mean = 0, \r\n sd = 1, lower.tail = FALSE)\r\n }\r\n }\r\n if (n.random == \"FALSE\" & length(n) == 1) {\r\n n = rep(n, N)\r\n }\r\n else {\r\n if (n.random == \"TRUE\" & length(n) >= 1) {\r\n n = sample(n, N, replace = TRUE)\r\n }\r\n }\r\n p1 = pi * tp * se2 + (1 - pi) * fp * (1 - sp2)\r\n p2 = pi * tp * (1 - se2) + (1 - pi) * fp * sp2\r\n p3 = pi * (1 - tp) * se2 + (1 - pi) * (1 - fp) * (1 - sp2)\r\n p4 = pi * (1 - tp) * (1 - se2) + (1 - pi) * (1 - fp) * sp2\r\n prob = cbind(p1, p2, p3, p4)\r\n data.sim = matrix(0, ncol = 4, nrow = N)\r\n colnames(data.sim) = c(\"++\", \"+-\", \"-+\", \"--\")\r\n for (i in 1:N) {\r\n data.sim[i, ] = rmultinom(n = 1, size = n[i], prob = prob[i, \r\n ])\r\n }\r\n file.TV = \"True_values.txt\"\r\n file.TV2 = \"True_values2.txt\"\r\n file.TV3 = \"True_REFSTD.txt\"\r\n file.TV.index = \"True_values_index.txt\"\r\n if (SCO == FALSE) {\r\n sim1 = cbind(alpha, theta, tp, (1 - fp), pi, data.sim[, \r\n 1], data.sim[, 2], data.sim[, 3], data.sim[, 4])\r\n write.table(sim1, file = file.TV, col.names = FALSE, \r\n row.names = FALSE)\r\n }\r\n else {\r\n if (SCO == TRUE) {\r\n sim1 = cbind(alpha, tp, (1 - fp), pi, data.sim[, \r\n 1:4])\r\n write.table(sim1, file = file.TV, col.names = FALSE, \r\n row.names = FALSE)\r\n }\r\n }\r\n if (SCO == FALSE) {\r\n if (Gold_Std == TRUE) {\r\n sim2 = c(T, sd_t, L, sd_a, b, TP_overall, (1 - FP_overall))\r\n write(sim2, file = file.TV2, ncolumns = 7)\r\n names(sim2) = c(\"THETA\", \"sigma theta\", \"LAMBDA\", \r\n \"sigma alpha\", \"beta\", \"Overal ++\", \"Overall --\")\r\n sim_rs = \"PERFECT\"\r\n }\r\n else {\r\n if (Gold_Std == FALSE & model == 1) {\r\n sim2 = c(T, sd_t, L, sd_a, b, TP_overall, (1 - \r\n FP_overall))\r\n write(sim2, file = file.TV2, ncolumns = 7)\r\n names(sim2) = c(\"THETA\", \"sigma theta\", \"LAMBDA\", \r\n \"sigma alpha\", \"beta\", \"Overal ++\", \"Overall --\")\r\n sim_rs = rbind(s2, c2)\r\n write.table(sim_rs, file = file.TV3, col.names = FALSE, \r\n row.names = FALSE)\r\n sim_rs_label = numeric()\r\n for (i in 1:n_rs) {\r\n sim_rs_label = c(sim_rs_label, i)\r\n }\r\n colnames(sim_rs) = sim_rs_label\r\n }\r\n }\r\n }\r\n else {\r\n if (SCO == TRUE) {\r\n if (Gold_Std == TRUE) {\r\n sim2 = c(T, L, sd_a, b, TP_overall, (1 - FP_overall))\r\n write(sim2, file = file.TV2, ncolumns = 6)\r\n names(sim2) = c(\"THETA\", \"LAMBDA\", \"sigma alpha\", \r\n \"beta\", \"Overal ++\", \"Overall --\")\r\n sim_rs = \"PERFECT\"\r\n }\r\n else {\r\n if (Gold_Std == FALSE & model == 1) {\r\n sim2 = c(T, L, sd_a, b, TP_overall, (1 - FP_overall))\r\n write(sim2, file = file.TV2, ncolumns = 6)\r\n names(sim2) = c(\"THETA\", \"LAMBDA\", \"sigma alpha\", \r\n \"beta\", \"Overal ++\", \"Overall --\")\r\n sim_rs = rbind(s2, c2)\r\n write.table(sim_rs, file = file.TV3, col.names = FALSE, \r\n row.names = FALSE)\r\n sim_rs_label = numeric()\r\n for (i in 1:n_rs) {\r\n sim_rs_label = c(sim_rs_label, i)\r\n }\r\n colnames(sim_rs) = sim_rs_label\r\n }\r\n }\r\n }\r\n }\r\n if (SCO == FALSE) {\r\n write(paste(\"______________________________________________________\"), \r\n file = file.TV.index, append = TRUE)\r\n write(paste(\"\\t True_values.txt \"), file = file.TV.index, \r\n append = TRUE)\r\n write(paste(\"Column 1 : alpha parameters for all M = \", \r\n N, \" study(ies)\\t \"), file = file.TV.index, append = TRUE)\r\n write(paste(\"Column 2 : theta parameters for all M = \", \r\n N, \" study(ies)\\t \"), file = file.TV.index, append = TRUE)\r\n write(paste(\"Column 3 : sensitivity of test under evaluation (S1) for all M = \", \r\n N, \" study(ies)\\t \"), file = file.TV.index, append = TRUE)\r\n write(paste(\"Column 4 : specificity of test under evaluation (C1) for all M = \", \r\n N, \" study(ies)\\t \"), file = file.TV.index, append = TRUE)\r\n write(paste(\"Column 5 : prevalence for all M = \", N, \r\n \" study(ies)\\t \"), file = file.TV.index, append = TRUE)\r\n write(paste(\"Column 6 : Observed cell with both test under evaluation and reference standard positive\\t \"), \r\n file = file.TV.index, append = TRUE)\r\n write(paste(\"Column 7 : Observed cell with test under evaluation positive and reference standard negative\\t \"), \r\n file = file.TV.index, append = TRUE)\r\n write(paste(\"Column 8 : Observed cell with test under evaluation negative and reference standard positive\\t \"), \r\n file = file.TV.index, append = TRUE)\r\n write(paste(\"Column 9 : Observed cell with both test under evaluation and reference standard negative\\t \"), \r\n file = file.TV.index, append = TRUE)\r\n write(paste(\"______________________________________________________\"), \r\n file = file.TV.index, append = TRUE)\r\n write(paste(\"\\t True_values2.txt \"), file = file.TV.index, \r\n append = TRUE)\r\n write(paste(\"Column 1 : THETA parameter\\t \"), file = file.TV.index, \r\n append = TRUE)\r\n write(paste(\"Column 2 : sigma theta parameter\\t \"), \r\n file = file.TV.index, append = TRUE)\r\n write(paste(\"Column 3 : LAMBDA parameter\\t \"), file = file.TV.index, \r\n append = TRUE)\r\n write(paste(\"Column 4 : sigma alpha parameter\\t \"), \r\n file = file.TV.index, append = TRUE)\r\n write(paste(\"Column 5 : beta parameter\\t \"), file = file.TV.index, \r\n append = TRUE)\r\n if (Gold_Std == TRUE) {\r\n write(paste(\"Column 6 : Overall sensitivity of test under evaluation (S1 overall) \\t \"), \r\n file = file.TV.index, append = TRUE)\r\n write(paste(\"Column 7 : Overall specificity of test under evaluation (C1 overall) \\t \"), \r\n file = file.TV.index, append = TRUE)\r\n write(paste(\"______________________________________________________\"), \r\n file = file.TV.index, append = TRUE)\r\n }\r\n else {\r\n if (Gold_Std == FALSE & model == 1) {\r\n write(paste(\"Column 6 : Overall sensitivity of test under evaluation (S1 overall) \\t \"), \r\n file = file.TV.index, append = TRUE)\r\n write(paste(\"Column 7 : Overall specificity of test under evaluation (C1 overall) \\t \"), \r\n file = file.TV.index, append = TRUE)\r\n write(paste(\"______________________________________________________\"), \r\n file = file.TV.index, append = TRUE)\r\n write(paste(\"\\t True_REFSTD.txt \"), file = file.TV.index, \r\n append = TRUE)\r\n write(paste(\"Row 1 : sensitivity of reference standard (S2) \\t \"), \r\n file = file.TV.index, append = TRUE)\r\n write(paste(\"Row 2 : specificity of reference standard (C2) \\t \"), \r\n file = file.TV.index, append = TRUE)\r\n }\r\n }\r\n }\r\n else {\r\n if (SCO == TRUE) {\r\n write(paste(\"______________________________________________________\"), \r\n file = file.TV.index, append = TRUE)\r\n write(paste(\"\\t True_values.txt \"), file = file.TV.index, \r\n append = TRUE)\r\n write(paste(\"Column 1 : alpha parameters for all M = \", \r\n N, \" study(ies)\\t \"), file = file.TV.index, \r\n append = TRUE)\r\n write(paste(\"Column 2 : sensitivity of test under evaluation (S1) for all M = \", \r\n N, \" study(ies)\\t \"), file = file.TV.index, \r\n append = TRUE)\r\n write(paste(\"Column 3 : specificity of test under evaluation (C1) for all M = \", \r\n N, \" study(ies)\\t \"), file = file.TV.index, \r\n append = TRUE)\r\n write(paste(\"Column 4 : prevalence for all M = \", \r\n N, \" study(ies)\\t \"), file = file.TV.index, \r\n append = TRUE)\r\n write(paste(\"Column 5 : Observed cell with both test under evaluation and reference standard positive\\t \"), \r\n file = file.TV.index, append = TRUE)\r\n write(paste(\"Column 6 : Observed cell with test under evaluation positive and reference standard negative\\t \"), \r\n file = file.TV.index, append = TRUE)\r\n write(paste(\"Column 7 : Observed cell with test under evaluation negative and reference standard positive\\t \"), \r\n file = file.TV.index, append = TRUE)\r\n write(paste(\"Column 8 : Observed cell with both test under evaluation and reference standard negative\\t \"), \r\n file = file.TV.index, append = TRUE)\r\n write(paste(\"______________________________________________________\"), \r\n file = file.TV.index, append = TRUE)\r\n write(paste(\"SAME CUT-OFF USED OVER ALL STUDIES\"), \r\n file = file.TV.index, append = TRUE)\r\n write(paste(\"______________________________________________________\"), \r\n file = file.TV.index, append = TRUE)\r\n write(paste(\"\\t True_values2.txt \"), file = file.TV.index, \r\n append = TRUE)\r\n write(paste(\"Column 1 : THETA parameter\\t \"), \r\n file = file.TV.index, append = TRUE)\r\n write(paste(\"Column 2 : LAMBDA parameter\\t \"), \r\n file = file.TV.index, append = TRUE)\r\n write(paste(\"Column 3 : sigma alpha parameter\\t \"), \r\n file = file.TV.index, append = TRUE)\r\n write(paste(\"Column 4 : beta parameter\\t \"), file = file.TV.index, \r\n append = TRUE)\r\n if (Gold_Std == TRUE) {\r\n write(paste(\"Column 5 : Overall sensitivity of test under evaluation (S1 overall) \\t \"), \r\n file = file.TV.index, append = TRUE)\r\n write(paste(\"Column 6 : Overall specificity of test under evaluation (C1 overall) \\t \"), \r\n file = file.TV.index, append = TRUE)\r\n write(paste(\"______________________________________________________\"), \r\n file = file.TV.index, append = TRUE)\r\n }\r\n else {\r\n if (Gold_Std == FALSE & model == 1) {\r\n write(paste(\"Column 5 : Overall sensitivity of test under evaluation (S1 overall) \\t \"), \r\n file = file.TV.index, append = TRUE)\r\n write(paste(\"Column 6 : Overall specificity of test under evaluation (C1 overall) \\t \"), \r\n file = file.TV.index, append = TRUE)\r\n write(paste(\"______________________________________________________\"), \r\n file = file.TV.index, append = TRUE)\r\n write(paste(\"\\t True_REFSTD.txt \"), file = file.TV.index, \r\n append = TRUE)\r\n write(paste(\"Row 1 : sensitivity of reference standard (S2) \\t \"), \r\n file = file.TV.index, append = TRUE)\r\n write(paste(\"Row 2 : specificity of reference standard (C2) \\t \"), \r\n file = file.TV.index, append = TRUE)\r\n }\r\n }\r\n }\r\n }\r\n if (N == 1) {\r\n ssim1 = rbind(sim1[, 1:5])\r\n colnames(ssim1) = c(\"alpha\", \"theta\", \"++\", \"--\", \"prev\")\r\n }\r\n else {\r\n ssim1 = sim1[, 1:5]\r\n colnames(ssim1) = c(\"alpha\", \"theta\", \"++\", \"--\", \"prev\")\r\n }\r\n sim.results = list(data.sim, ssim1, sim2, sim_rs)\r\n names(sim.results) = c(\"Data\", \"Whithin study parameters\", \r\n \"Between study parameters\", \"Reference standard\")\r\n return(sim.results)\r\n}\r\n" }, { "alpha_fraction": 0.5843037366867065, "alphanum_fraction": 0.5855785608291626, "avg_line_length": 43.784820556640625, "blob_id": "b81e359be31dab0946e85ff518987f752225fac8", "content_id": "d5cf511a4583960aa2178cc88d414e67d7149e4b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 64321, "license_type": "no_license", "max_line_length": 147, "num_lines": 1436, "path": "/src/meta_form.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "######################################\n# # \n# Byron C. Wallace #\n# Tufts Medical Center # \n# OpenMeta[analyst] # \n# # \n# Container form for UI. Handles # \n# user interaction. # \n# # \n######################################\n\nimport pickle\nfrom PyQt4 import QtCore, QtGui\nfrom PyQt4.Qt import *\nimport copy\n\n## hand-rolled modules\nimport ui_meta\nimport ma_data_table_view\nimport ma_data_table_model\nimport meta_globals\nfrom meta_globals import *\nimport ma_dataset\nfrom settings import *\n\n# additional forms\nimport add_new_dialogs\nimport results_window\nimport ma_specs \nimport diag_metrics\nimport meta_reg_form\nimport meta_subgroup_form\nimport edit_dialog\nimport edit_group_name_form\nimport change_cov_type_form\nimport network_view\nimport conf_level_dialog\nimport main_wizard\nimport easter_egg\n\n# for the help\nimport webbrowser\n\n\n\nimport forms.ui_running\nclass ImportProgress(QDialog, forms.ui_running.Ui_running):\n def __init__(self, parent=None, min_=0, max_=10):\n super(ImportProgress, self).__init__(parent)\n self.setupUi(self)\n \n self.setWindowTitle(\"Importing from CSV...\")\n self.progress_bar.setRange(min_,max_)\n \n def setValue(self, value):\n if self.progress_bar.minimum() <= value <= self.progress_bar.maximum():\n self.progress_bar.setValue(value)\n \n def minimum(self):\n return self.progress_bar.minimum()\n def maximum(self):\n return self.progress_bar.maximum()\n def value(self):\n return self.progress_bar.value()\n \n###############################################################################\n\nclass MetaForm(QtGui.QMainWindow, ui_meta.Ui_MainWindow):\n\n def __init__(self, parent=None):\n # We follow the advice given by Mark Summerfield in his Python QT book: \n # Namely, we use multiple inheritance to gain access to the ui. We take\n # this approach throughout OpenMeta.\n super(MetaForm, self).__init__(parent)\n self.setupUi(self)\n \n # crazy number to indicate the conf level hasn't really been set yet\n self.cl_label=QLabel(\"confidence level: {:.1%}\".format(meta_globals.DEFAULT_CONF_LEVEL/2/100.0))\n self.cl_label.setAlignment(Qt.AlignRight)\n self.statusbar.addWidget(self.cl_label,1)\n \n\n # TODO should also allow a (path to a) dataset\n # to be given on the console.\n self.model = None\n self.new_dataset()\n\n # flag maintaining whether the current dataset\n # has been saved\n self.current_data_unsaved = False\n\n self.tableView.setModel(self.model)\n # attach a delegate for editing\n self.tableView.setItemDelegate(ma_data_table_view.StudyDelegate(self))\n\n # the nav_lbl text corresponds to the currently selected\n # 'dimension', e.g., outcome or treatment. New points\n # can then be added to this dimension, or it can be traveled\n # along using the horizontal nav arrows (the vertical arrows\n # navigate along the *dimensions*)\n self.dimensions =[\"outcome\", \"follow-up\", \"group\"]\n self.cur_dimension_index = 0\n self.update_dimension()\n self._setup_connections()\n self.tableView.setSelectionMode(QTableView.ContiguousSelection)\n self.model.reset()\n ## \n # we hand off a reference of the main gui to the table view\n # so that it can do things like pass suitable events 'up'\n # to the main form \n self.tableView.main_gui = self\n self.tableView.resizeColumnsToContents()\n \n\n self.out_path = None # path to output file\n self.metric_menu_is_set_for = None # BINARY, CONTINUOUS, or DIAGNOSTIC\n\n # by default, disable meta-regression (until we have covariates)\n self.action_meta_regression.setEnabled(False)\n \n load_settings()\n self.populate_open_recent_menu()\n \n # The most important code of the entire application\n show_tom = QAction(self)\n show_tom.setShortcut(QKeySequence(\"T, Shift+O, M\"))\n self.addAction(show_tom)\n QObject.connect(show_tom, SIGNAL(\"triggered()\"), self._show_tom)\n \n \n if DISABLE_NETWORK_STUFF:\n self.action_view_network.setEnabled(False)\n else:\n self.action_view_network.setEnabled(False)\n \n\n def start(self):\n # show the welcome dialog \n start_up_wizard = main_wizard.MainWizard(parent=self, \n recent_datasets=get_setting('recent_files'))\n \n if start_up_wizard.exec_():\n wizard_data = start_up_wizard.get_results()\n self._handle_wizard_results(wizard_data)\n else:\n QApplication.quit()\n \n def closeEvent(self, event):\n self.quit()\n\n\n def _model_about_to_be_reset(self):\n '''Call all the functions here that should be called when the model is\n about to be reset'''\n self._recalculate_display_scale_values()\n \n def _recalculate_display_scale_values(self):\n print(\"got to recalc disp scale values\")\n \n self.tableView.model().recalculate_display_scale()\n\n\n def create_new_dataset(self, use_undo_framework=True):\n if self.current_data_unsaved:\n choice = self.prompt_to_save_unsaved_data()\n if choice == QMessageBox.Yes:\n self.save()\n elif choice == QMessageBox.No:\n pass\n else: # cancel\n return\n \n wizard = main_wizard.MainWizard(parent=self, path=\"new_dataset\")\n if wizard.exec_():\n wizard_data = wizard.get_results()\n self._handle_wizard_results(wizard_data)\n \n def new_dataset(self, name=DEFAULT_DATASET_NAME, is_diag=False, use_undo_framework = True):\n \n data_model = ma_dataset.Dataset(title=name, is_diag=is_diag)\n if self.model is not None:\n if use_undo_framework:\n original_dataset = copy.deepcopy(self.model.dataset)\n old_state_dict = self.tableView.model().get_stateful_dict()\n undo_f = lambda : self.set_model(original_dataset, old_state_dict) \n redo_f = lambda : self.set_model(data_model)\n edit_command = meta_globals.CommandGenericDo(redo_f, undo_f)\n self.tableView.undoStack.push(edit_command)\n else: # not using undo framework (probably when importing csv (it will handle it internally)\n self.set_model(data_model)\n else:\n self.model = ma_data_table_model.DatasetModel(dataset=data_model)\n # no dataset; disable saving, editing, etc.\n self.disable_menu_options_that_require_dataset()\n # set the out_path to None; this (new) dataset is unsaved.\n self.out_path = None\n\n\n def _notify_user_that_data_is_unsaved(self):\n if self.out_path is None:\n self.dataset_file_lbl.setText(\"<font color='red'>careful! your data isn't saved yet</font>\")\n else:\n self.dataset_file_lbl.setText(\"open file: <font color='red'>%s</font>\" % self.out_path)\n\n def toggle_menu_options_that_require_dataset(self, enable):\n self.action_go.setEnabled(enable)\n self.action_cum_ma.setEnabled(enable)\n self.action_loo_ma.setEnabled(enable)\n self.action_meta_regression.setEnabled(enable)\n self._enable_action_subgroup_ma()\n \n def _enable_action_subgroup_ma(self):\n ''' Enables action_subgroup_ma if there are suitable covariate(s)\n i.e. of type Factor '''\n \n if any([cov.get_data_type() == meta_globals.FACTOR for cov in self.model.dataset.covariates]):\n self.action_subgroup_ma.setEnabled(True)\n else:\n self.action_subgroup_ma.setEnabled(False)\n \n def disable_menu_options_that_require_dataset(self):\n self.toggle_menu_options_that_require_dataset(False)\n\n def enable_menu_options_that_require_dataset(self):\n self.toggle_menu_options_that_require_dataset(True)\n\n def keyPressEvent(self, event):\n if (event.modifiers() & QtCore.Qt.ControlModifier):\n if event.key() == QtCore.Qt.Key_S:\n # ctrl + s = save\n print \"saving..\"\n self.save()\n elif event.key() == QtCore.Qt.Key_O:\n # ctrl + o = open\n self.open()\n elif event.key() == QtCore.Qt.Key_A:\n self.analysis()\n\n def _disconnections(self):\n ''' \n disconnects model-related signs/slots. this should be called prior to swapping\n in a new model, e.g., when a dataset is loaded, to tear down the relevant connections. \n _setup_connections (with menu_actiosn set to False) should subsequently be invoked. \n '''\n \n #QObject.disconnect(self.tableView.model(), SIGNAL(\"conf_level_changed()\"), self._change_conf_level_label)\n QObject.disconnect(self.tableView.model(), SIGNAL(\"pyCellContentChanged(PyQt_PyObject, PyQt_PyObject, PyQt_PyObject, PyQt_PyObject)\"),\n self.tableView.cell_content_changed)\n\n QObject.disconnect(self.tableView.model(), SIGNAL(\"outcomeChanged()\"),\n self.tableView.displayed_ma_changed)\n QObject.disconnect(self.tableView.model(), SIGNAL(\"followUpChanged()\"),\n self.tableView.displayed_ma_changed)\n \n QObject.disconnect(self.tableView, SIGNAL(\"dataDirtied()\"), self.data_dirtied)\n \n QObject.disconnect(self.tableView.model(), SIGNAL(\"modelAboutToBeReset()\"),\n self._model_about_to_be_reset)\n\n\n def data_error(self, msg):\n QMessageBox.warning(self.parent(), \"whoops\", msg)\n\n def set_edit_focus(self, index):\n ''' sets edit focus to the row,col specified by index.'''\n self.tableView.setCurrentIndex(index)\n self.tableView.edit(index)\n \n def populate_open_recent_menu(self):\n recent_datasets = get_setting('recent_files')\n recent_datasets.reverse() # most-recent first\n # qt designer inexplicably forcing the _2. not sure why; \n # gave up struggling with it. grr.\n self.action_open_recent_2.clear()\n for dataset in recent_datasets:\n action_item = QAction(QString(dataset), self.action_open_recent_2)\n self.action_open_recent_2.addAction(action_item)\n QObject.connect(action_item, SIGNAL(\"triggered()\"), self.dataset_selected) \n \n \n \n\n def dataset_selected(self):\n dataset_path = QObject.sender(self).text()\n self.open(file_path=dataset_path)\n \n def _change_global_ci(self):\n print(\"Changing global confidence level:\")\n prev_conf_level = self.model.get_global_conf_level()\n\n dialog = conf_level_dialog.ChangeConfLevelDlg(prev_conf_level, self)\n if dialog.exec_():\n new_conf_level = dialog.get_value()\n change_cl_command = Command_Change_Conf_Level(prev_conf_level, new_conf_level, mainform=self)\n self.tableView.undoStack.push(change_cl_command)\n \n def _import_csv(self):\n '''Import data from csv file'''\n wizard = main_wizard.MainWizard(parent=self, path=\"csv_import\")\n if wizard.exec_():\n wizard_data = wizard.get_results()\n self._handle_wizard_results(wizard_data)\n \n def _setup_connections(self, menu_actions=True):\n ''' Signals & slots '''\n QObject.connect(self.tableView.model(), SIGNAL(\"pyCellContentChanged(PyQt_PyObject, PyQt_PyObject, PyQt_PyObject, PyQt_PyObject)\"),\n self.tableView.cell_content_changed)\n\n QObject.connect(self.tableView.model(), SIGNAL(\"outcomeChanged()\"), self.tableView.displayed_ma_changed)\n QObject.connect(self.tableView.model(), SIGNAL(\"followUpChanged()\"), self.tableView.displayed_ma_changed)\n \n ###\n # this is not ideal, but I couldn't get the rowsInserted methods working. \n # basically, the modelReset (which is custom to this app; not a QT thing, per se)\n # is emitted when a model reset was called but the edit focus should be set back to \n # where it was before this reset() call (reset clears the current editor).\n # this index is the QModelIndex. this is used, e.g., when a new study is added.\n # this fixes bug #20.\n QObject.connect(self.tableView.model(), SIGNAL(\"modelReset(QModelIndex)\"),\n self.set_edit_focus) \n \n # Do actions when the model is about to be reset (for now, just\n # recalculate display scale values)\n QObject.connect(self.tableView.model(), SIGNAL(\"modelAboutToBeReset()\"), self._model_about_to_be_reset)\n \n ###\n # this listens to the model regarding errors in data entry -- \n # such data will be rejected (e.g., strings for counts, or whatever),\n # and this hook allows the model to pass along error messages to the\n # user. the data checking happens in ma_dataset (specifically, in the\n # setData method) \n QObject.connect(self.tableView.model(), SIGNAL(\"dataError(QString)\"), self.data_error)\n\n QObject.connect(self.tableView, SIGNAL(\"dataDirtied()\"), self.data_dirtied) \n if menu_actions: \n QObject.connect(self.nav_add_btn, SIGNAL(\"pressed()\"), self.add_new)\n QObject.connect(self.nav_right_btn, SIGNAL(\"pressed()\"), self.next)\n QObject.connect(self.nav_left_btn, SIGNAL(\"pressed()\"), self.previous)\n QObject.connect(self.nav_up_btn, SIGNAL(\"pressed()\"), self.next_dimension)\n QObject.connect(self.nav_down_btn, SIGNAL(\"pressed()\"), self.previous_dimension)\n \n QObject.connect(self.action_save, SIGNAL(\"triggered()\"), self.save)\n QObject.connect(self.action_save_as, SIGNAL(\"triggered()\"), self.save_as)\n QObject.connect(self.action_open, SIGNAL(\"triggered()\"), self.open)\n QObject.connect(self.action_new_dataset, SIGNAL(\"triggered()\"), self.create_new_dataset)\n QObject.connect(self.action_quit, SIGNAL(\"triggered()\"), self.quit)\n QObject.connect(self.action_go, SIGNAL(\"triggered()\"), self.go)\n QObject.connect(self.action_cum_ma, SIGNAL(\"triggered()\"), self.cum_ma)\n QObject.connect(self.action_loo_ma, SIGNAL(\"triggered()\"), self.loo_ma)\n \n QObject.connect(self.action_undo, SIGNAL(\"triggered()\"), self.undo)\n QObject.connect(self.action_redo, SIGNAL(\"triggered()\"), self.redo)\n QObject.connect(self.action_copy, SIGNAL(\"triggered()\"), self.tableView.copy)\n QObject.connect(self.action_paste, SIGNAL(\"triggered()\"), self.tableView.paste)\n \n QObject.connect(self.action_edit, SIGNAL(\"triggered()\"), self.edit_dataset)\n QObject.connect(self.action_view_network, SIGNAL(\"triggered()\"), self.view_network)\n QObject.connect(self.action_add_covariate, SIGNAL(\"triggered()\"), self.add_covariate)\n \n QObject.connect(self.action_meta_regression, SIGNAL(\"triggered()\"), self.meta_reg)\n QObject.connect(self.action_subgroup_ma, SIGNAL(\"triggered()\"), self.meta_subgroup_get_cov)\n\n QObject.connect(self.action_open_help, SIGNAL(\"triggered()\"), self.show_help)\n QObject.connect(self.action_change_conf_level, SIGNAL(\"triggered()\"), self._change_global_ci)\n QObject.connect(self.action_import_csv, SIGNAL(\"triggered()\"), self._import_csv)\n\n def _change_conf_level_label(self):\n conf_level = self.model.get_global_conf_level()\n self.cl_label.setText(\"confidence level: {:.1%}\".format(conf_level/100.0))\n\n def go(self):\n form = None\n if self.model.get_current_outcome_type() != \"diagnostic\":\n # in the binary and continuous case, we go straight \n # to selecting the metric/parameters here.\n #\n # note that the spec form gets *this* form as a parameter.\n # this allows the spec form to callback to this\n # module when specifications have been provided.\n form = ma_specs.MA_Specs(self.model, parent=self, conf_level=self.model.get_global_conf_level())\n else:\n # diagnostic data; we first have the user select metric(s),\n # and only then the model, &etc.\n form = diag_metrics.Diag_Metrics(self.model, parent=self)\n form.show()\n \n def meta_reg(self):\n form = meta_reg_form.MetaRegForm(self.model, parent=self)\n form.show()\n \n def data_dirtied(self):\n self._notify_user_that_data_is_unsaved()\n self.current_data_unsaved = True\n\n def meta_subgroup_get_cov(self):\n form = meta_subgroup_form.MetaSubgroupForm(self.model, parent=self)\n form.show()\n \n ####\n # Here are the calls to ma_specs with so-called `meta-methods`\n # which operate over the output of meta-analytic methods. Note\n # that we don't care what sort of data we're operating over here;\n # ma_specs takes care of that. The convention is that each meta\n # method, for example `cum.ma`, has .binary and .continuous \n # implementation.\n ### TODO pull out meta methods auto-magically via introspection.\n def cum_ma(self):\n # NOTE that we do not allow cumulative meta-analysis on\n # diagnostic data -- this method should never be invoked\n # if we're dealing with diag data.\n form = None\n # note that the spec form gets *this* form as a parameter.\n # this allows the spec form to callback to this\n # module when specifications have been provided.\n if self.model.get_current_outcome_type() != \"diagnostic\":\n form = ma_specs.MA_Specs(self.model, meta_f_str=\"cum.ma\", parent=self, conf_level=self.model.get_global_conf_level())\n else:\n # diagnostic data; we first have the user select metric(s),\n # and only then the model, &etc.\n '''\n @@ TODO this is not actually implemented! i.e., we do not have\n a cumulative diagnostic MA method. for now this method should\n *never* be called with diagnostic data.\n '''\n form = diag_metrics.Diag_Metrics(self.model, meta_f_str=\"cum.ma\", \\\n parent=self) \n\n form.show()\n \n def loo_ma(self):\n form = None\n if self.model.get_current_outcome_type() != \"diagnostic\":\n # in the binary and continuous case, we go straight \n # to selecting the metric/parameters here.\n #\n # note that the spec form gets *this* form as a parameter.\n # this allows the spec form to callback to this\n # module when specifications have been provided.\n form = ma_specs.MA_Specs(self.model, meta_f_str=\"loo.ma\", parent=self, conf_level=self.model.get_global_conf_level())\n else:\n # diagnostic data; we first have the user select metric(s),\n # and only then the model, &etc.\n form = diag_metrics.Diag_Metrics(self.model, meta_f_str=\"loo.ma\", \\\n parent=self)\n\n form.show()\n\n def show_help(self):\n webbrowser.open(meta_globals.HELP_URL)\n\n def meta_subgroup(self, selected_cov):\n form = None\n if self.model.get_current_outcome_type() != \"diagnostic\":\n # in the binary and continuous case, we go straight \n # to selecting the metric/parameters here.\n #\n # note that the spec form gets *this* form as a parameter.\n # this allows the spec form to callback to this\n # module when specifications have been provided.\n form = ma_specs.MA_Specs(self.model,\n meta_f_str=\"subgroup.ma\", \n parent=self, \n external_params={\"cov_name\":selected_cov},\n conf_level=self.model.get_global_conf_level())\n else:\n # diagnostic data; we first have the user select metric(s),\n # and only then the model, &etc.\n form = diag_metrics.Diag_Metrics(self.model, meta_f_str=\"subgroup.ma\", \\\n parent=self,\\\n external_params={\"cov_name\":selected_cov})\n\n form.show()\n \n def undo(self):\n self.tableView.undoStack.undo()\n \n def redo(self):\n self.tableView.undoStack.redo()\n \n def edit_dataset(self):\n cur_dataset = copy.deepcopy(self.model.dataset)\n edit_window = edit_dialog.EditDialog(cur_dataset, parent=self)\n \n if edit_window.exec_():\n # if we edited the current dataset when there was no\n # outcome yet, then we want to default to an outcome\n # that was added.\n\n ### get stateful dictionary here, update, pass to \n old_state_dict = self.tableView.model().get_stateful_dict()\n new_state_dict = copy.deepcopy(old_state_dict)\n\n # update the new state dict to reflect the currently selected\n # outcomes, etc.\n new_state_dict[\"current_outcome\"] = old_state_dict[\"current_outcome\"]\n\n if edit_window.outcome_list.model().current_outcome is not None:\n new_state_dict[\"current_outcome\"] = edit_window.outcome_list.model().current_outcome\n # fix for issue #130: if the current outcome no longer exists, pick a different one.\n elif new_state_dict[\"current_outcome\"] not in \\\n edit_window.outcome_list.model().outcome_list:\n # then just show a random outcome\n new_state_dict[\"current_outcome\"] = edit_window.outcome_list.model().outcome_list[0]\n\n new_state_dict[\"current_time_point\"] = max(edit_window.follow_up_list.currentIndex().row(), 0)\n grp_list = edit_window.group_list.model().group_list\n\n if len(grp_list) >= 2:\n new_state_dict[\"current_txs\"] = grp_list[:2]\n else:\n # new_state_dict[\"current_txs\"] = [\"tx A\", \"tx B\"]\n new_state_dict[\"current_txs\"] = meta_globals.DEFAULT_GROUP_NAMES\n modified_dataset = edit_window.dataset\n \n redo_f = lambda : self.set_model(modified_dataset, new_state_dict)\n original_dataset = copy.deepcopy(self.model.dataset)\n undo_f = lambda : self.set_model(original_dataset, old_state_dict) \n edit_command = meta_globals.CommandGenericDo(redo_f, undo_f)\n self.tableView.undoStack.push(edit_command)\n \n \n def populate_metrics_menu(self, metric_to_check=None):\n '''\n Populates the `metric` sub-menu with available metrics for the\n current datatype.\n '''\n \n self.menuMetric.clear()\n self.menuMetric.setDisabled(False)\n\n if self.model.get_current_outcome_type()==\"binary\":\n self.add_binary_metrics(metric_to_check=metric_to_check)\n self.metric_menu_is_set_for = meta_globals.BINARY\n\n elif self.model.get_current_outcome_type()==\"continuous\":\n self.add_continuous_metrics(metric_to_check=metric_to_check)\n self.metric_menu_is_set_for = meta_globals.CONTINUOUS\n \n else:\n # diagnostic data; deactive metrics option\n # we always show sens. + spec. for diag. data.\n self.menuMetric.setDisabled(True)\n self.metric_menu_is_set_for = meta_globals.DIAGNOSTIC\n\n \n def add_binary_metrics(self, metric_to_check=None):\n self.add_metrics(meta_globals.BINARY_ONE_ARM_METRICS,\\\n meta_globals.BINARY_TWO_ARM_METRICS,\n metric_to_check=metric_to_check)\n \n def add_continuous_metrics(self, metric_to_check=None):\n self.add_metrics(meta_globals.CONTINUOUS_ONE_ARM_METRICS,\\\n meta_globals.CONTINUOUS_TWO_ARM_METRICS,\n metric_to_check=metric_to_check)\n \n def add_metrics(self, one_arm_metrics, two_arm_metrics, \\\n metric_to_check=None):\n # we'll add sub-menus for two-arm and one-arm metrics\n self.twoArmMetricMenu = self.add_sub_metric_menu(\"two-arm\")\n self.oneArmMetricMenu = self.add_sub_metric_menu(\"one-arm\")\n\n for i,metric in enumerate(two_arm_metrics):\n metric_action = self.add_metric_action(metric, self.twoArmMetricMenu)\n if metric == metric_to_check or (metric_to_check is None and i == 0):\n # arbitrarily check the first metric in the case that none\n # is specificied \n metric_action.setChecked(True)\n \n # now add the one-arm metrics\n for metric in one_arm_metrics:\n metric_action = self.add_metric_action(metric, self.oneArmMetricMenu) \n if metric == metric_to_check:\n metric_action.setChecked(True)\n \n\n def add_sub_metric_menu(self, name):\n sub_menu = QtGui.QMenu(QString(name), self.menuMetric)\n self.menuMetric.addAction(sub_menu.menuAction())\n return sub_menu\n \n def add_metric_action(self, metric, menu):\n metric_names = meta_globals.ALL_METRIC_NAMES\n \n metric_action = QAction(QString(metric+\": \"+metric_names[metric]), self)\n try:\n if str(metric) in metric_names:\n metric_action.setToolTip(metric_names[metric]) # doesn't do anything in OSX?\n metric_action.setStatusTip(metric_names[metric])\n metric_action.setData(QVariant(metric)) # store code for metric in here\n except:\n print(\"Could not set metric name tooltip\")\n metric_action.setCheckable(True)\n QObject.connect(metric_action,\n SIGNAL(\"toggled(bool)\"),\n lambda: self.metric_selected(metric, menu)\n )\n menu.addAction(metric_action) \n return metric_action\n \n \n def deselect_all_metrics(self):\n # de-selects all metrics\n # it doesn't appear that there is a more\n # straight forward way of doing this, \n # unfortunately.\n data_type = self.tableView.model().get_current_outcome_type(get_str=False)\n if data_type in (meta_globals.BINARY, meta_globals.CONTINUOUS):\n # then there are sub-menus (one-group, two-group)\n for sub_menu in self.menuMetric.actions():\n sub_menu = sub_menu.menu()\n for action in sub_menu.actions():\n action.blockSignals(True)\n action.setChecked(False)\n action.blockSignals(False)\n\n def metric_selected(self, metric_name, menu):\n # first deselect the previous metric\n self.deselect_all_metrics()\n \n # now select the newly chosen one.\n prev_metric_name = self.tableView.model().current_effect\n for action in menu.actions():\n #action_text = action.text()\n action_data = action.data().toString()\n #if action_text == metric_name:\n if action_data == metric_name:\n action.blockSignals(True)\n action.setChecked(True)\n action.blockSignals(False)\n \n self.tableView.model().set_current_metric(metric_name)\n self.model.try_to_update_outcomes() \n self.model.reset()\n self.tableView.resizeColumnsToContents()\n \n def view_network(self):\n view_window = network_view.ViewDialog(self.model, parent=self)\n view_window.show()\n \n def analysis(self, results):\n if results is None:\n return # analysis failed\n else: # analysis succeeded\n form = results_window.ResultsWindow(results, parent=self)\n form.show()\n\n\n def edit_group_name(self, cur_group_name):\n orig_group_name = copy.copy(cur_group_name)\n edit_group_form = edit_group_name_form.EditGroupName(cur_group_name, parent=self)\n if edit_group_form.exec_():\n new_group_name = unicode(edit_group_form.group_name_le.text().toUtf8(), \"utf-8\")\n \n # make sure the group name doesn't already exist\n if new_group_name in self.model.dataset.get_group_names():\n QMessageBox.warning(self,\n \"whoops.\",\n \"%s is already a group name -- pick something else, please\" % new_group_name)\n \n else:\n redo_f = lambda: self.model.rename_group(orig_group_name, new_group_name)\n undo_f = lambda: self.model.rename_group(new_group_name, orig_group_name)\n\n rename_group_command = meta_globals.CommandGenericDo(redo_f, undo_f)\n self.tableView.undoStack.push(rename_group_command)\n \n def add_covariate(self):\n form = add_new_dialogs.AddNewCovariateForm(self)\n form.covariate_name_le.setFocus()\n if form.exec_():\n # then the user clicked 'ok'.\n new_covariate_name = unicode(form.covariate_name_le.text().toUtf8(), \"utf-8\")\n\n # fix for issue #59; do not allow the user to create two covariates with\n # the same name!\n new_covariate_type = str(form.datatype_cbo_box.currentText()).lower()\n if new_covariate_name in self.model.get_covariate_names():\n QMessageBox.warning(self,\n \"whoops.\",\n \"you've already entered a covariate with the name %s; please pick another name.\" % \\\n new_covariate_name)\n else:\n redo_f = lambda: self._add_new_covariate(new_covariate_name, new_covariate_type)\n undo_f = lambda: self._undo_add_new_covariate(new_covariate_name)\n \n add_cov_command = meta_globals.CommandGenericDo(redo_f, undo_f)\n self.tableView.undoStack.push(add_cov_command)\n \n\n def _add_new_covariate(self, cov_name, cov_type):\n self.model.add_covariate(cov_name, cov_type)\n print \"new covariate name: %s with type %s\" % (cov_name, cov_type)\n self.tableView.resizeColumnsToContents()\n self.action_meta_regression.setEnabled(True)\n \n def _undo_add_new_covariate(self, cov_name):\n self.model.remove_covariate(cov_name)\n self.tableView.resizeColumnsToContents()\n if len(self.model.covariates) == 0:\n self.action_meta_regression.setEnabled(False)\n \n def add_new(self, startup_outcome = None):\n redo_f, undo_f = None, None\n if self.cur_dimension == \"outcome\" and not startup_outcome:\n form = add_new_dialogs.AddNewOutcomeForm(parent=self, is_diag=self.model.is_diag())\n form.outcome_name_le.setFocus()\n if form.exec_():\n # then the user clicked ok and has added a new outcome.\n # here we want to add the outcome to the dataset, and then\n # display it\n new_outcome_name = unicode(form.outcome_name_le.text().toUtf8(), \"utf-8\")\n # the outcome type is one of the enumerated types; we don't worry about\n # unicode encoding\n new_outcome_type = str(form.datatype_cbo_box.currentText())\n redo_f = lambda: self._add_new_outcome(new_outcome_name, new_outcome_type)\n prev_outcome = str(self.model.current_outcome)\n undo_f = lambda: self._undo_add_new_outcome(new_outcome_name, prev_outcome)\n elif self.cur_dimension == \"outcome\" and startup_outcome: # For dealing with outcomes from the startup form\n new_outcome_name = unicode(startup_outcome['name'].toUtf8(), \"utf-8\")\n new_outcome_type = str(startup_outcome['data_type'])\n try:\n new_outcome_subtype = startup_outcome['sub_type']\n except:\n print(\"ERROR: No outcome subtype detected.\")\n #pyqtRemoveInputHook()\n #pdb.set_trace()\n print 'Startup Outcome',startup_outcome\n redo_f = lambda: self._add_new_outcome(new_outcome_name, new_outcome_type, new_outcome_subtype)\n prev_outcome = str(self.model.current_outcome)\n undo_f = lambda: self._undo_add_new_outcome(new_outcome_name, prev_outcome)\n elif self.cur_dimension == \"group\":\n form = add_new_dialogs.AddNewGroupForm(self)\n form.group_name_le.setFocus() \n if form.exec_():\n new_group_name = unicode(form.group_name_le.text().toUtf8(), \"utf-8\")\n cur_groups = list(self.model.get_current_groups())\n redo_f = lambda: self._add_new_group(new_group_name)\n undo_f = lambda: self._undo_add_new_group(new_group_name, cur_groups)\n else:\n # then the dimension is follow-up\n form = add_new_dialogs.AddNewFollowUpForm(self)\n form.follow_up_name_le.setFocus()\n if form.exec_():\n follow_up_lbl = unicode(form.follow_up_name_le.text().toUtf8(), \"utf-8\")\n redo_f = lambda: self._add_new_follow_up_for_cur_outcome(follow_up_lbl)\n previous_follow_up = self.model.get_current_follow_up_name()\n undo_f = lambda: self._undo_add_follow_up_for_cur_outcome(\\\n previous_follow_up, follow_up_lbl)\n\n if redo_f is not None:\n next_command = meta_globals.CommandGenericDo(redo_f, undo_f)\n self.tableView.undoStack.push(next_command)\n \n def _add_new_group(self, new_group_name):\n self.model.add_new_group(new_group_name)\n print \"\\nok. added new group: %s\" % new_group_name\n cur_groups = list(self.model.get_current_groups())\n cur_groups[1] = new_group_name\n self.model.set_current_groups(cur_groups)\n # @TODO probably need to tell the table model we changed \n # the group being displayed...\n self.display_groups(cur_groups)\n \n def _undo_add_new_group(self, added_group, previously_displayed_groups):\n self.model.remove_group(added_group)\n print \"\\nremoved group %s\" % added_group\n print \"attempting to display groups: %s\" % previously_displayed_groups\n self.model.set_current_groups(previously_displayed_groups)\n self.display_groups(previously_displayed_groups)\n \n def _undo_add_new_outcome(self, added_outcome, previously_displayed_outcome):\n print \"removing added outcome: %s\" % added_outcome\n self.model.remove_outcome(added_outcome)\n print \"trying to display: %s\" % previously_displayed_outcome\n ##\n # RESOLVED previously, if previous outcome was None, this threw up\n # (see Issue 4: http://github.com/bwallace/OpenMeta-analyst-/issues#issue/4)\n self.display_outcome(previously_displayed_outcome)\n \n def _add_new_outcome(self, outcome_name, outcome_type, sub_type=None):\n self.model.add_new_outcome(outcome_name, outcome_type, sub_type=sub_type)\n self.display_outcome(outcome_name)\n \n def _add_new_follow_up_for_cur_outcome(self, follow_up_lbl):\n self.model.add_follow_up_to_current_outcome(follow_up_lbl)\n self.display_follow_up(self.model.get_t_point_for_follow_up_name(follow_up_lbl))\n \n def _undo_add_follow_up_for_cur_outcome(self, prev_follow_up, follow_up_to_del):\n self.model.remove_follow_up_from_outcome(follow_up_to_del, \\\n str(self.model.current_outcome))\n self.display_follow_up(self.model.get_t_point_for_follow_up_name(prev_follow_up))\n \n def next(self):\n # probably you should disable next for the current dimension\n # if there is only one point (e.g., outcome). otherwise you end\n # up enqueueing a bunch of pointless undo/redos.\n redo_f, undo_f = None, None\n if self.cur_dimension == \"outcome\":\n old_outcome = self.model.current_outcome\n ## \n # note that we have to cache the currently displayed\n # groups, as well. these groups may or may not be available\n # on the next outcome; the next_outcome call may therefore\n # default to displaying some other group(s). however, this\n # would cause problems when the 'next' action is undone, as in\n # such a case the previous (current) outcome will be displayed,\n # but the groups being displayed may be other than what they \n # should be (i.e., than what they are currently)\n previous_groups = self.model.get_current_groups()\n next_outcome = self.model.get_next_outcome_name()\n redo_f = lambda: self.display_outcome(next_outcome)\n previous_follow_up = self.model.get_current_follow_up_name()\n undo_f = lambda: self.display_outcome(old_outcome, \\\n follow_up_name=previous_follow_up, group_names=previous_groups)\n elif self.cur_dimension == \"group\":\n previous_groups = self.model.get_current_groups()\n new_groups = self.model.next_groups()\n redo_f = lambda: self.display_groups(new_groups)\n undo_f = lambda: self.display_groups(previous_groups)\n elif self.cur_dimension == \"follow-up\":\n old_follow_up_t_point = self.model.current_time_point\n next_follow_up_t_point = self.model.get_next_follow_up()[0]\n redo_f = lambda: self.display_follow_up(next_follow_up_t_point) \n undo_f = lambda: self.display_follow_up(old_follow_up_t_point)\n \n if redo_f is not None and undo_f is not None:\n next_command = meta_globals.CommandGenericDo(redo_f, undo_f)\n self.tableView.undoStack.push(next_command)\n \n def previous(self):\n redo_f, undo_f = None, None\n if self.cur_dimension == \"outcome\":\n old_outcome = self.model.current_outcome\n next_outcome = self.model.get_prev_outcome_name()\n redo_f = lambda: self.display_outcome(next_outcome)\n undo_f = lambda: self.display_outcome(old_outcome)\n elif self.cur_dimension == \"group\":\n cur_groups = self.model.get_current_groups()\n prev_groups = self.model.get_previous_groups()\n redo_f = lambda: self.display_groups(prev_groups)\n undo_f = lambda: self.display_groups(cur_groups)\n elif self.cur_dimension == \"follow-up\":\n old_follow_up_t_point = self.model.current_time_point\n previous_follow_up_t_point = self.model.get_previous_follow_up()[0]\n redo_f = lambda: self.display_follow_up(previous_follow_up_t_point) \n undo_f = lambda: self.display_follow_up(old_follow_up_t_point)\n \n if redo_f is not None and undo_f is not None:\n prev_command = meta_globals.CommandGenericDo(redo_f, undo_f)\n self.tableView.undoStack.push(prev_command)\n\n def next_dimension(self):\n '''\n In keeping with the dimensions metaphor, wherein the various\n components that can comprise a dataset are 'dimensions' (e.g.,\n outcomes), this function iterates over the dimensions. So if you call\n this method, then 'next()', the next method will step forward in the\n dimension made active here.\n '''\n if self.cur_dimension_index == len(self.dimensions)-1:\n self.cur_dimension_index = 0\n else:\n self.cur_dimension_index+=1\n self.update_dimension()\n\n def previous_dimension(self):\n if self.cur_dimension_index == 0:\n self.cur_dimension_index = len(self.dimensions)-1\n else:\n self.cur_dimension_index-=1\n self.update_dimension()\n\n def update_dimension(self):\n self.cur_dimension = self.dimensions[self.cur_dimension_index]\n self.nav_lbl.setText(self.cur_dimension)\n\n def display_groups(self, groups):\n print \"displaying groups: %s\" % groups\n self.model.set_current_groups(groups)\n self.model.try_to_update_outcomes()\n self.model.reset()\n self.tableView.resizeColumnsToContents()\n \n def display_outcome(self, outcome_name, group_names=None, follow_up_name=None):\n print \"displaying outcome: %s\" % outcome_name\n ###\n # We need to update which groups & follow-ups are current\n # in order to avoid attempting to display a group/fu that\n # do not belong to the outcome_name. \n self.model.set_current_outcome(outcome_name)\n self.populate_metrics_menu()\n \n # first ascertain if the currently displayed follow up is\n # available for this outcome\n if follow_up_name is not None:\n self.model.set_current_follow_up(follow_up_name)\n else:\n # If a follow up isn't explicitly passed in, attempt to use\n # the current follow up. If this does not exist for the outcome\n # to be displayed, then display a different follow up.\n cur_follow_up = self.model.get_current_follow_up_name()\n if not self.model.outcome_has_follow_up(outcome_name, cur_follow_up):\n # then the outcome does not have this follow up and we have to \n # step on to the next one.\n next_follow_up = self.model.get_next_follow_up()[1]\n self.model.set_current_follow_up(next_follow_up)\n \n # now we check the groups.\n if group_names is not None:\n self.model.set_current_groups(group_names)\n else:\n # then no group names were explicitly passed in; ascertain\n # that the outcome/fu contains the current groups; if not,\n # set them to something else.\n cur_groups = self.model.get_current_groups()\n if not all([self.model.outcome_fu_has_group(\\\n outcome_name, self.model.get_current_follow_up_name(), group) for group in cur_groups]):\n self.model.set_current_groups(self.model.next_groups())\n \n self.cur_outcome_lbl.setText(u\"<font color='Blue'>%s</font>\" % outcome_name)\n self.cur_time_lbl.setText(u\"<font color='Blue'>%s</font>\" % self.model.get_current_follow_up_name())\n self.model.reset()\n self.tableView.resizeColumnsToContents()\n\n def display_follow_up(self, time_point):\n print \"follow up\"\n self.model.current_time_point = time_point\n self.update_follow_up_label()\n self.model.reset()\n self.tableView.resizeColumnsToContents()\n \n def update_follow_up_label(self):\n self.cur_time_lbl.setText(u\"<font color='Blue'>%s</font>\" % self.model.get_current_follow_up_name())\n \n def open(self, file_path=None):\n '''\n This gets called when the user opts to open an existing dataset. Note that we make use\n of the pickled dataset itself (.oma) and we also look for a corresponding `state`\n dictionary, which contains things like which outcome was currently displayed, etc.\n Also note that, as in Excel, the open operation is undoable. \n '''\n \n if self.current_data_unsaved:\n choice = self.prompt_to_save_unsaved_data()\n if choice == QMessageBox.Yes:\n self.save()\n elif choice == QMessageBox.No:\n pass\n else: # cancel\n return\n\n # if no file path is provided, prompt the user.\n if file_path is None:\n file_path = QFileDialog.getOpenFileName(\n parent=self,\n caption=QString(\"OpenMeta[analyst] - Open File\"),\n directory=\".\",\n filter=\"open meta files (*.oma)\")\n file_path = unicode(file_path.toUtf8(),'utf8')\n\n # if the user didn't select anything, we return false. \n if file_path == \"\":\n return False \n\n add_file_to_recent_files(file_path)\n \n data_model = None\n print \"loading %s...\" % file_path\n try:\n data_model = pickle.load(open(file_path, 'r'))\n print \"successfully loaded data\"\n except Exception as e:\n msg = \"Could not open %s, error: %s\" % (file_path, str(e))\n print(msg)\n QMessageBox.critical(self, \"whoops\", msg)\n return None\n \n ## cache current state for undo.\n prev_out_path = copy.copy(self.out_path)\n prev_state_dict = copy.copy(self.model.get_stateful_dict())\n \n self.out_path = file_path\n \n state_dict = None\n try:\n state_dict = pickle.load(open(file_path + \".state\"))\n print \"found state dictionary: \\n%s\" % state_dict\n except:\n print \"no state dictionary found -- using 'reasonable' defaults\"\n state_dict = self.tableView.model().make_reasonable_stateful_dict(data_model)\n print \"made state dictionary: \\n%s\" % state_dict\n\n prev_dataset = self.model.dataset.copy()\n \n undo_f = lambda: self.undo_set_model(prev_out_path, prev_state_dict,\n prev_dataset)\n redo_f = lambda: self.set_model(data_model, state_dict,\n check_for_appropriate_metric=True)\n \n open_command = meta_globals.CommandGenericDo(redo_f, undo_f)\n self.tableView.undoStack.push(open_command)\n self.dataset_file_lbl.setText(\"open file: %s\" % file_path)\n \n # we just opened it, so it's 'saved'\n self.current_data_unsaved = False\n\n return True\n\n \n def delete_study(self, study, study_index=None):\n undo_f = lambda : self._add_study(study, study_index=study_index)\n redo_f = lambda : self._remove_study(study)\n delete_command = meta_globals.CommandGenericDo(redo_f, undo_f)\n self.tableView.undoStack.push(delete_command)\n \n def change_cov_type(self, covariate):\n cur_dataset = copy.deepcopy(self.model.dataset)\n # keep the current study order, because we're going to sort the studies\n # on the change_cov_form but we want to revert to the ordering\n # they came in with when we're done.\n original_study_order = [study.name for study in self.model.dataset.studies]\n\n change_type_form = \\\n change_cov_type_form.ChangeCovTypeForm(cur_dataset, covariate, parent=self)\n \n if change_type_form.exec_():\n modified_dataset = change_type_form.dataset\n # revert to original study ordering\n modified_dataset.studies.sort(\\\n cmp=modified_dataset.cmp_studies(compare_by=\"ordered_list\",\\\n ordered_list=original_study_order,\n mult=self.model.get_mult()))\n \n ### use the same state dict as before.\n old_state_dict = self.tableView.model().get_stateful_dict()\n new_state_dict = copy.deepcopy(old_state_dict)\n\n redo_f = lambda : self.set_model(modified_dataset, new_state_dict)\n original_dataset = copy.deepcopy(self.model.dataset)\n undo_f = lambda : self.set_model(original_dataset, old_state_dict) \n edit_command = meta_globals.CommandGenericDo(redo_f, undo_f)\n self.tableView.undoStack.push(edit_command)\n \n\n def rename_covariate(self, covariate):\n orig_cov_name = copy.copy(covariate.name)\n # TODO need to rename edit_group_name_form to something more general...\n edit_cov_form = edit_group_name_form.EditCovariateName(orig_cov_name, parent=self)\n if edit_cov_form.exec_():\n # the field names are also poorly named, in this case. here we mean the \n # **covariate name**, of course.\n new_cov = unicode(edit_cov_form.group_name_le.text().toUtf8(), \"utf-8\")\n \n # make sure the group name doesn't already exist\n if new_cov in self.model.dataset.get_cov_names():\n QMessageBox.warning(self,\n \"whoops.\",\n \"%s is already a covariate name -- pick something else, please\" % new_cov)\n \n else:\n ###\n # TODO implement rename_covariate!\n redo_f = lambda: self.model.rename_covariate(orig_cov_name, new_cov)\n undo_f = lambda: self.model.rename_covariate(new_cov, orig_cov_name)\n\n rename_cov_command = meta_globals.CommandGenericDo(redo_f, undo_f)\n self.tableView.undoStack.push(rename_cov_command)\n\n\n def delete_covariate(self, covariate):\n cov_vals_d = self.model.dataset.get_values_for_cov(covariate.name)\n undo_f = lambda : \\\n self.model.add_covariate(covariate.name, \\\n meta_globals.COV_INTS_TO_STRS[covariate.data_type], \\\n cov_values=cov_vals_d)\n redo_f = lambda : self.model.remove_covariate(covariate)\n delete_command = meta_globals.CommandGenericDo(redo_f, undo_f)\n self.tableView.undoStack.push(delete_command) \n\n def _add_study(self, study, study_index=None):\n print \"adding study: %s\" % study.name\n self.model.dataset.add_study(study, study_index=study_index)\n self.model.reset()\n self.data_dirtied()\n \n def _remove_study(self, study):\n print \"deleting study: %s\" % study.name\n self.model.dataset.studies.remove(study)\n self.model.reset()\n self.data_dirtied()\n\n def set_model(self, data_model, state_dict=None, check_for_appropriate_metric=False):\n \n ##\n # we explicitly append a blank study to the\n # dataset iff there is fewer than 1 study \n # in the dataset. in this case, the only \n # row is essentially a blank study. \n add_blank_study = len(data_model) < 1\n self.model = ma_data_table_model.DatasetModel(dataset=data_model, \n add_blank_study=add_blank_study)\n\n self._disconnections()\n if len(data_model) >= 2:\n self.enable_menu_options_that_require_dataset()\n else:\n self.disable_menu_options_that_require_dataset()\n \n # covariates?\n if len(data_model.covariates) > 0:\n self.action_meta_regression.setEnabled(True)\n else:\n self.action_meta_regression.setEnabled(False)\n\n self.tableView.setModel(self.model)\n\n ## moving the statefulendess \n # update below the model swap-out\n # to fix issue #62\n if state_dict is not None:\n self.model.set_state(state_dict)\n\n\n print(\"calling update col indices from meta form set_model()\")\n self.tableView.model().update_column_indices()\n self.tableView.resizeColumnsToContents()\n \n if check_for_appropriate_metric:\n self.tableView.change_metric_if_appropriate()\n\n# if self.model.get_current_outcome_type() == \"diagnostic\":\n# # no cumulative MA for diagnostic data\n# self.action_cum_ma.setEnabled(False)\n# else:\n# self.action_cum_ma.setEnabled(True)\n\n self.model_updated()\n self.data_dirtied()\n print \"ok -- model set.\"\n \n \n def model_updated(self):\n ''' Call me when the model is changed. '''\n self.model.update_current_group_names()\n self.model.update_current_outcome()\n self.model.update_current_time_points()\n\n if self.model.current_outcome is not None:\n self.model.try_to_update_outcomes()\n \n # This is kind of subtle. We have to reconnect\n # our signals and slots when the underlying model \n # changes, because otherwise the antiquated/replaced\n # model (which was connected to the slots of interest)\n # remains, which is useless. However, we do not\n # reconnect the menu_action options; this will cause those\n # methods to be called x times! (x being the number of times\n # _setup_connections is invoked)\n self._setup_connections(menu_actions=False)\n self.tableView.resizeColumnsToContents()\n self.update_outcome_lbl()\n self.update_follow_up_label()\n \n ####\n # adding check to ascertain that the menu\n # isn't already ready for the current kind of data\n cur_data_type = self.tableView.model().get_current_outcome_type(get_str=False)\n if self.metric_menu_is_set_for != cur_data_type:\n self.populate_metrics_menu(\\\n metric_to_check=self.tableView.model().current_effect)\n\n self.model.reset()\n self._change_conf_level_label()\n \n \n def undo_set_model(self, out_path, state_dict, dataset):\n self.model = ma_data_table_model.DatasetModel(dataset)\n self.model.set_state(state_dict)\n self.out_path = out_path\n self._disconnections()\n self.tableView.setModel(self.model)\n self.model_updated()\n self.dataset_file_lbl.setText(\"open file: %s\" % self.out_path)\n\n \n def update_outcome_lbl(self):\n self.cur_outcome_lbl.setText(\\\n u\"<font color='Blue'>%s</font>\" % self.model.current_outcome)\n \n def quit(self):\n if self.current_data_unsaved:\n choice = self.prompt_to_save_unsaved_data()\n if choice == QMessageBox.Yes:\n self.save()\n elif choice == QMessageBox.No:\n pass\n else: # Cancel\n return \n \n save_settings()\n QApplication.quit()\n \n def prompt_to_save_unsaved_data(self):\n choice = QMessageBox.warning(self,\n \"Warning\",\n \"you've made unsaved changes to your data. Do you want to save your changes?\",\n QMessageBox.Yes | QMessageBox.No | QMessageBox.Cancel)\n return choice\n\n def save_as(self):\n return self.save(save_as=True)\n\n def save(self, save_as=False):\n \n docs_path = get_user_documents_path()\n if self.out_path is None or save_as:\n # use current out_path otherwise base it on the current dataset name\n if self.out_path:\n out_f = unicode(self.out_path)\n else:\n out_f = os.path.join(docs_path, self.model.get_name())\n\n out_f = QFileDialog.getSaveFileName(\n parent=self,\n caption=\"OpenMeta[analyst] - Save File\",\n directory=out_f,\n filter=\"open meta files: (.oma)\",\n )\n out_f = unicode(out_f.toUtf8(),'utf8')\n if out_f == \"\" or out_f == None:\n return None\n else:\n self.out_path = out_f\n \n # add proper file extension\n try:\n if self.out_path[-4:] != u\".oma\":\n self.out_path += u\".oma\"\n print(\"added proper file extension\")\n except Exception as e:\n print(\"\")\n print(e)\n \n \n try:\n print \"trying to write data out to: %s\" % self.out_path\n f = open(self.out_path, 'wb')\n pickle.dump(self.model.dataset, f)\n f.close()\n # also write out the 'state', which contains things\n # pertaining to the view\n d = self.model.get_stateful_dict()\n f = open(self.out_path + \".state\", 'wb')\n pickle.dump(d, f)\n f.close()\n\n # add dataset to recent files\n add_file_to_recent_files(self.out_path)\n \n self.dataset_file_lbl.setText(\"open file: %s\" % self.out_path)\n self.current_data_unsaved = False\n except Exception, e:\n # @TODO handle this elegantly?\n print e\n raise Exception, \"whoops. exception thrown attempting to save.\"\n\n\n \n def _show_tom(self):\n tom_dlg = easter_egg.TomDialog(parent=self)\n tom_dlg.exec_()\n\n def _make_new_dataset_and_setup_spreadsheet(self,dataset_info):\n is_diag = dataset_info['data_type'] == \"diagnostic\"\n self.new_dataset(is_diag=is_diag)\n \n tmp = self.cur_dimension\n self.cur_dimension = \"outcome\"\n self.add_new(dataset_info) # add the outcome\n self.cur_dimension = tmp\n \n if dataset_info['data_type'] in [\"binary\", \"continuous\"]:\n self.model.current_effect = dataset_info['effect'] # set current effect\n self.populate_metrics_menu(metric_to_check=self.model.current_effect)\n self.model.try_to_update_outcomes()\n self.model.reset()\n \n def _handle_wizard_results(self, wizard_data):\n path = wizard_data['path'] # route through wizard\n \n dataset_info = wizard_data['outcome_info']\n \n if path == \"open\":\n self.open(file_path=wizard_data['selected_dataset'])\n elif path == \"new_dataset\":\n self._make_new_dataset_and_setup_spreadsheet(dataset_info)\n\n elif path == \"csv_import\":\n csv_data = wizard_data['csv_data']\n \n # Back-up original dataset\n original_dataset = copy.deepcopy(self.model.dataset)\n old_state_dict = self.tableView.model().get_stateful_dict()\n \n self._make_new_dataset_and_setup_spreadsheet(dataset_info)\n \n new_dataset = copy.deepcopy(self.model.dataset)\n new_state_dict = self.tableView.model().get_stateful_dict()\n \n imported_data = csv_data['data']\n # Note: may want at some point to access the headers provided in the CSV;\n # these are accessible at csv_data['headers'] and\n # csv_data['expected_headers']\n covariate_names = csv_data['covariate_names']\n covariate_types = csv_data['covariate_types']\n \n print(\"Data to import: %s\\ncovariate names: %s\\ncovariate_types: %s\" % (str(imported_data),str(covariate_names),str(covariate_types) ))\n \n #Undo/redo stuff\n importcsv_command = CommandImportCSV(\n original_dataset=original_dataset,\n old_state_dict=old_state_dict,\n new_dataset=new_dataset,\n new_state_dict=new_state_dict,\n imported_data=imported_data,\n main_form=self,\n covariate_names=covariate_names,\n covariate_types=covariate_types)\n self.tableView.undoStack.push(importcsv_command)\n \n\n######################### Undo Command for Import CSV #########################\nclass CommandImportCSV(QUndoCommand):\n def __init__(self,\n original_dataset=None, old_state_dict=None,\n new_dataset=None, new_state_dict=None,\n main_form=None,\n imported_data=None,\n covariate_names=None, covariate_types=None,\n description=\"Import a CSV file\"):\n super(CommandImportCSV, self).__init__(description)\n self.imported_data = imported_data\n self.covariate_names = covariate_names\n self.covariate_types = covariate_types\n self.main_form = main_form\n \n # Undo / redo stuff\n self.original_dataset = original_dataset\n self.old_state_dict = old_state_dict\n self.new_dataset = new_dataset\n self.new_state_dict = new_state_dict\n \n self.new_dataset_has_imported_data = False\n \n def redo(self):\n if self.new_dataset_has_imported_data: #already imported once before, this is a real 'redo'\n self.main_form.set_model(self.new_dataset, self.new_state_dict)\n else: # this a first run\n self._import_data_into_new_dataset()\n self.new_dataset = copy.deepcopy(self.main_form.model.dataset)\n self.new_state_dict = self.main_form.tableView.model().get_stateful_dict()\n self.new_dataset_has_imported_data = True\n \n def undo(self):\n self.main_form.set_model(self.original_dataset, self.old_state_dict)\n self.main_form.model.reset()\n QApplication.processEvents()\n \n def _import_data_into_new_dataset(self):\n self.main_form.set_model(self.new_dataset, self.new_state_dict)\n \n # Set data in model:\n num_rows = len(self.imported_data)\n num_cols = len(self.imported_data[0])\n \n # Handle covariates\n if self.covariate_names != []:\n for name, cov_type in zip(self.covariate_names, self.covariate_types):\n self.main_form._add_new_covariate(name, cov_type)\n\n # Copy data into table\n progress_bar = ImportProgress(self.main_form, 0, num_rows*num_cols-1)\n \n \n \n progress_bar.setValue(0)\n progress_bar.show()\n for row in range(num_rows):\n for col in range(num_cols):\n progress_bar.setValue(row*num_cols + col)\n QApplication.processEvents()\n print(\"bar_ value: %s\" % str([progress_bar.value(),progress_bar.minimum(), progress_bar.maximum()]))\n value = QVariant(QString(self.imported_data[row][col]))\n self.main_form.model.setData(self.main_form.model.index(row,col+1), value, import_csv=True)\n \n progress_bar.hide() # we are done\n####################### END Undo Command for Import CSV #######################\n \nclass CommandNext(QUndoCommand):\n '''\n This is an undo command for user navigation\n '''\n def __init__(self, redo_f, undo_f, description=\"command:: next dimension\"):\n super(CommandNext, self).__init__(description)\n self.redo_f = redo_f\n self.undo_f = undo_f\n \n def redo(self):\n self.redo_f()\n \n def undo(self):\n self.undo_f()\n \n\nclass Command_Change_Conf_Level(QUndoCommand):\n ''' Undo command for chnaging the confidence level '''\n def __init__(self, old_conf_lvl, new_conf_lvl, mainform, description=\"Change confidence level\"):\n super(Command_Change_Conf_Level, self).__init__(description)\n \n \n self.old_cl = old_conf_lvl\n self.new_cl = new_conf_lvl\n self.mainform = mainform\n \n \n def redo(self):\n self._set_conf_level(self.new_cl)\n \n def undo(self):\n self._set_conf_level(self.old_cl)\n \n def _set_conf_level(self, conf_level):\n self.mainform.model.set_conf_level(conf_level)\n self.mainform.cl_label.setText(\"confidence level: {:.1%}\".format(conf_level/100.0))\n self.mainform.model.reset()\n print(\"Global Confidence level is now: %f\" % self.mainform.model.get_global_conf_level())\n \n\n" }, { "alpha_fraction": 0.7177067995071411, "alphanum_fraction": 0.7322205901145935, "avg_line_length": 35.24324417114258, "blob_id": "10bf453131f9a79302d19291d7d8595a21000f89", "content_id": "32db1dfe3bd0dd69423e4172fb040b0870d9b51d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1378, "license_type": "no_license", "max_line_length": 430, "num_lines": 37, "path": "/src/R/HSROC/man/In.house.Rd", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "\\name{In.house}\r\n\\alias{In.house}\r\n\\docType{data}\r\n\\title{ IN-HOUSE NUCLEIC ACID AMPLIFICATION TESTS (INH) FOR TB PLEURITIS }\r\n\r\n\r\n\\description{\r\nThis dataset gives the observed cross-tabulation of the test under evaluation (nucleic acid amplification test) and the reference test. In studies 1 and 2, the reference test was a culture test, in studies 3 and 4 it was a composite reference test of culture and clinical data (including signs, symptoms and clinical response to empiric TB therapy) and in studies 5 to 11 it was a composite reference test of culture and biopsy.\r\n}\r\n\r\n\\usage{data(In.house)}\r\n\r\n\r\n\\format{\r\n A matrix with 11 observations on the following 4 variables.\r\n \\describe{\r\n \\item{\\code{++}}{Observed individuals who tested positive on both tests }\r\n \\item{\\code{+-}}{Observed individuals who tested positive on the test under evaluation and negative on the reference test}\r\n \\item{\\code{-+}}{Observed individuals who tested negative on the test under evaluation and positive on the reference test}\r\n \\item{\\code{---}}{Observed individuals who tested negative on both tests }\r\n }\r\n}\r\n\r\n\r\n\\references{\r\nPai, M. et al. (2004) \\emph{Nucleic acid amplification in the diagnosis of tuberculous pleuritis: a systematic review and meta-analysis}. \r\nBMC Infect Dis 2004, 4:6.\r\n\r\n}\r\n\r\n\\examples{\r\ndata(In.house)\r\nIn.house\r\n\r\n}\r\n\r\n\\keyword{datasets}\r\n" }, { "alpha_fraction": 0.5927413105964661, "alphanum_fraction": 0.5988398194313049, "avg_line_length": 38.251461029052734, "blob_id": "e2fb4510abd4a93e46abe0bde40aa34c0b20f07f", "content_id": "e510e1b46f92c0ab4ffa7150370c55e795fb3e71", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6723, "license_type": "no_license", "max_line_length": 101, "num_lines": 171, "path": "/src/edit_forest_plot_form.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "#import pdb\n#import os\n\nfrom PyQt4.Qt import QObject, SIGNAL\nfrom PyQt4.QtGui import QDialog, QDialogButtonBox\n\nimport forms.ui_edit_forest_plot\nimport ma_specs\nimport meta_py_r\nimport meta_globals\n\nclass EditPlotWindow(QDialog, forms.ui_edit_forest_plot.Ui_edit_forest_plot_dlg):\n\n def __init__(self, img_params_path, png_path, qpixmap_item, parent=None):\n super(EditPlotWindow, self).__init__(parent)\n self.setupUi(self)\n\n # img_params is a string that is the variable\n # name for the R object \n self.img_params_path = img_params_path\n print \"parameters: %s\" % self.img_params_path\n\n # if we're unable to load the required R data files,\n # e.g., because they were moved or deleted, then fail\n self.params_d = meta_py_r.load_vars_for_plot(self.img_params_path, \\\n return_params_dict=True)\n\n\n if not self.params_d:\n print \"can't load R data for plot editing!\"\n return None\n\n # @TODO reflect current params in UI at launch\n #self.populate_params()\n self.set_ui_values()\n\n # this is the QPixMap object that houses the\n # plot image\n self.pixmap_item = qpixmap_item\n\n self.png_path = png_path\n\n # the handle to the window in which\n # the image is being displayed\n self.results_window = parent\n\n self.current_param_vals = {}\n\n # get the button object\n self.apply_button = self.buttonBox.button(QDialogButtonBox.Apply)\n QObject.connect(self.apply_button, SIGNAL(\"clicked()\"), self.regenerate_graph)\n self.populate_params()\n\n\n def set_ui_values(self):\n _to_bool = lambda x: True if x==\"TRUE\" else False\n\n # first fill in the col strs and show fields\n for col_i in [i+1 for i in xrange(4)]:\n cur_col_edit_box = eval(\"self.col%s_str_edit\" % col_i)\n cur_col_edit_box.setText(str(self.params_d[\"fp_col%s_str\" % col_i]))\n\n cur_chk_box = eval(\"self.show_%s\" % col_i)\n cur_chk_box.setChecked(self.params_d[\"fp_show_col%s\" % col_i])\n\n\n # x-label\n self.x_lbl_le.setText(str(self.params_d[\"fp_xlabel\"]))\n\n # set the outpath text\n self.image_path.setText(str(self.params_d[\"fp_outpath\"]))\n\n # bounds\n self.plot_lb_le.setText(str(self.params_d[\"fp_plot_lb\"]))\n self.plot_ub_le.setText(str(self.params_d[\"fp_plot_ub\"]))\n \n # xticks\n self.x_ticks_le.setText(str(self.params_d[\"fp_xticks\"]))\n\n ##self.show_summary_line.setChecked(_to_bool(self.params_d[\"fp_show_summary_line\"]))\n self.show_summary_line.setChecked(self.params_d[\"fp_show_summary_line\"])\n\n ###\n # TODO fix issue #153 -- Paul is going to edit the R routine\n # so that it overwrites the params '[default]' values\n # with the generated values used in practice -- we'll\n # just need to write them out here.\n # pyqtRemoveInputHook()\n #pdb.set_trace() \n\n\n def populate_params(self):\n '''\n fill in parameters will current values\n '''\n self.current_param_vals[\"fp_show_col1\"] = self.show_1.isChecked()\n self.current_param_vals[\"fp_col1_str\"] = unicode(self.col1_str_edit.text().toUtf8(), \"utf-8\")\n self.current_param_vals[\"fp_show_col2\"] = self.show_2.isChecked()\n self.current_param_vals[\"fp_col2_str\"] = unicode(self.col2_str_edit.text().toUtf8(), \"utf-8\")\n self.current_param_vals[\"fp_show_col3\"] = self.show_3.isChecked()\n self.current_param_vals[\"fp_col3_str\"] = unicode(self.col3_str_edit.text().toUtf8(), \"utf-8\")\n self.current_param_vals[\"fp_show_col4\"] = self.show_4.isChecked()\n self.current_param_vals[\"fp_col4_str\"] = unicode(self.col4_str_edit.text().toUtf8(), \"utf-8\")\n self.current_param_vals[\"fp_xlabel\"] = unicode(self.x_lbl_le.text().toUtf8(), \"utf-8\")\n self.current_param_vals[\"fp_outpath\"] = unicode(self.image_path.text().toUtf8(), \"utf-8\")\n \n plot_lb = unicode(self.plot_lb_le.text().toUtf8(), \"utf-8\")\n self.current_param_vals[\"fp_plot_lb\"] = \"[default]\"\n if plot_lb != \"[default]\" and meta_globals.check_plot_bound(plot_lb):\n self.current_param_vals[\"fp_plot_lb\"] = plot_lb\n\n plot_ub = unicode(self.plot_ub_le.text().toUtf8(), \"utf-8\")\n self.current_param_vals[\"fp_plot_ub\"] = \"[default]\"\n if plot_ub != \"[default]\" and meta_globals.check_plot_bound(plot_ub):\n self.current_param_vals[\"fp_plot_ub\"] = plot_ub\n\n xticks = unicode(self.x_ticks_le.text().toUtf8(), \"utf-8\")\n self.current_param_vals[\"fp_xticks\"] = \"[default]\"\n if xticks != \"[default]\" and meta_globals.seems_sane(xticks):\n self.current_param_vals[\"fp_xticks\"] = xticks\n \n self.current_param_vals[\"fp_show_summary_line\"] = \\\n self.show_summary_line.isChecked()\n\n\n def swap_graphic(self):\n new_pixmap = self.results_window.generate_pixmap(self.png_path)\n self.pixmap_item.setPixmap(new_pixmap)\n print \"ok -- plot updated in ui\"\n # maybe do something pretty here... ?\n\n def update_plot(self):\n '''\n update the plot parameters to select the user's\n preferences. also writes these to disk.\n '''\n # map the ui elements to the corresponding\n # parameter names in the plot params list\n ma_specs.add_plot_params(self)\n\n # load things up in the R side\n meta_py_r.load_vars_for_plot(self.img_params_path)\n\n # update relevant variables (on the R side)\n # with new values -- we also write the updated\n # params out to disk here\n meta_py_r.update_plot_params(self.current_param_vals, \\\n write_them_out=True, \\\n outpath=\"%s.params\" % self.img_params_path)\n\n # now re-generate the plot data on the R side of\n # things\n meta_py_r.regenerate_plot_data()\n\n\n # finally, actually make the plot and spit it to disk\n self.png_path = self.current_param_vals[\"fp_outpath\"]\n meta_py_r.generate_forest_plot(self.png_path)\n\n #meta_py_r.write_out_plot_data(\"%s.plotdata\" % self.img_params_path)\n meta_py_r.write_out_plot_data(\"%s\" % self.img_params_path)\n\n def regenerate_graph(self):\n # this loads the plot.data into R's environment;\n # the variable name will be plot.data\n self.update_plot()\n self.swap_graphic()\n\n # will need to tell it to \n #meta_py_r.generate_forest_plot(self.png_path)\n print \"OK!\"\n\n \n \n\n" }, { "alpha_fraction": 0.5368550419807434, "alphanum_fraction": 0.5466830730438232, "avg_line_length": 30.230770111083984, "blob_id": "210f34a864f87d1e3c7bf33b7961f94be8611018", "content_id": "01e588e12b32cfd4386f55fcdb991fe459f55b57", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 814, "license_type": "no_license", "max_line_length": 80, "num_lines": 26, "path": "/src/R/openmetar/R/meta_global.r", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "###############################################################################\n# global value set from python to control confidence level. At the moment, it\n# only affects calc.box.sizes in plotting.R\n\nget.mult.from.conf.level <- function() {\n\talpha <- 1.0-(CONF.LEVEL.GLOBAL/100.0)\n\tmult <- abs(qnorm(alpha/2.0))\n}\n\nset.global.conf.level <- function(conf.level) {\n\tCONF.LEVEL.GLOBAL <<- conf.level\n\t#cat(\"R: Confidence level is now\", CONF.LEVEL.GLOBAL)\n\treturn(CONF.LEVEL.GLOBAL)\n}\n\nget.global.conf.level <- function(NA.if.missing=FALSE) {\n\tif (!(\"CONF.LEVEL.GLOBAL\" %in% ls(envir=globalenv()))) {\n\t\tif (NA.if.missing) {\n\t\t\treturn(NA)\n\t\t} else {\n\t\t\tstop(\"Global confidence level not defined\")\n\t\t}\n\t}\n\treturn(CONF.LEVEL.GLOBAL)\n}\n################################################################################\n\t\n" }, { "alpha_fraction": 0.6751893758773804, "alphanum_fraction": 0.6992424130439758, "avg_line_length": 50.25242614746094, "blob_id": "a04b5ba4f0593ea6e7f49ff49ff3586eb043df96", "content_id": "f97b488fd294c0da3f1fb6d3d1c6220f73802880", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5280, "license_type": "no_license", "max_line_length": 197, "num_lines": 103, "path": "/src/ui_results_window.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'results_window.ui'\n#\n# Created: Fri Apr 12 14:25:48 2013\n# by: PyQt4 UI code generator 4.9.6\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt4 import QtCore, QtGui\nimport qconsole\n\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n def _fromUtf8(s):\n return s\n\ntry:\n _encoding = QtGui.QApplication.UnicodeUTF8\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig, _encoding)\nexcept AttributeError:\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig)\n\nclass Ui_ResultsWindow(object):\n def setupUi(self, ResultsWindow):\n ResultsWindow.setObjectName(_fromUtf8(\"ResultsWindow\"))\n ResultsWindow.resize(799, 544)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n ResultsWindow.setFont(font)\n self.centralwidget = QtGui.QWidget(ResultsWindow)\n self.centralwidget.setObjectName(_fromUtf8(\"centralwidget\"))\n self.verticalLayout = QtGui.QVBoxLayout(self.centralwidget)\n self.verticalLayout.setObjectName(_fromUtf8(\"verticalLayout\"))\n self.splitter = QtGui.QSplitter(self.centralwidget)\n self.splitter.setOrientation(QtCore.Qt.Vertical)\n self.splitter.setObjectName(_fromUtf8(\"splitter\"))\n self.frame = QtGui.QFrame(self.splitter)\n self.frame.setMinimumSize(QtCore.QSize(733, 0))\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n self.frame.setFont(font)\n self.frame.setFrameShape(QtGui.QFrame.StyledPanel)\n self.frame.setFrameShadow(QtGui.QFrame.Raised)\n self.frame.setObjectName(_fromUtf8(\"frame\"))\n self.horizontalLayout = QtGui.QHBoxLayout(self.frame)\n self.horizontalLayout.setObjectName(_fromUtf8(\"horizontalLayout\"))\n self.results_nav_splitter = QtGui.QSplitter(self.frame)\n self.results_nav_splitter.setOrientation(QtCore.Qt.Horizontal)\n self.results_nav_splitter.setObjectName(_fromUtf8(\"results_nav_splitter\"))\n self.nav_tree = QtGui.QTreeWidget(self.results_nav_splitter)\n self.nav_tree.setMaximumSize(QtCore.QSize(16777215, 16777215))\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n self.nav_tree.setFont(font)\n self.nav_tree.setObjectName(_fromUtf8(\"nav_tree\"))\n self.nav_tree.headerItem().setText(0, _fromUtf8(\"1\"))\n self.graphics_view = QtGui.QGraphicsView(self.results_nav_splitter)\n self.graphics_view.setToolTip(_fromUtf8(\"\"))\n self.graphics_view.setObjectName(_fromUtf8(\"graphics_view\"))\n self.horizontalLayout.addWidget(self.results_nav_splitter)\n #self.psuedo_console = QtGui.QTextEdit(self.splitter)\n self.psuedo_console = qconsole.QConsole(self.splitter)\n sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.psuedo_console.sizePolicy().hasHeightForWidth())\n self.psuedo_console.setSizePolicy(sizePolicy)\n self.psuedo_console.setMinimumSize(QtCore.QSize(733, 0))\n self.psuedo_console.setMaximumSize(QtCore.QSize(16777215, 16777215))\n self.psuedo_console.setBaseSize(QtCore.QSize(0, 0))\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Terminal\"))\n self.psuedo_console.setFont(font)\n self.psuedo_console.setAutoFillBackground(False)\n self.psuedo_console.setStyleSheet(_fromUtf8(\"background-color: rgb(0, 0, 0);\\n\"\n\"color: rgb(0, 255, 0);\"))\n self.psuedo_console.setLineWrapMode(QtGui.QTextEdit.NoWrap)\n self.psuedo_console.setAcceptRichText(False)\n self.psuedo_console.setObjectName(_fromUtf8(\"psuedo_console\"))\n self.verticalLayout.addWidget(self.splitter)\n ResultsWindow.setCentralWidget(self.centralwidget)\n self.menubar = QtGui.QMenuBar(ResultsWindow)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 799, 22))\n self.menubar.setObjectName(_fromUtf8(\"menubar\"))\n ResultsWindow.setMenuBar(self.menubar)\n self.statusbar = QtGui.QStatusBar(ResultsWindow)\n self.statusbar.setObjectName(_fromUtf8(\"statusbar\"))\n ResultsWindow.setStatusBar(self.statusbar)\n\n self.retranslateUi(ResultsWindow)\n QtCore.QMetaObject.connectSlotsByName(ResultsWindow)\n\n def retranslateUi(self, ResultsWindow):\n ResultsWindow.setWindowTitle(_translate(\"ResultsWindow\", \"results / analysis\", None))\n self.psuedo_console.setHtml(_translate(\"ResultsWindow\", \"<!DOCTYPE HTML PUBLIC \\\"-//W3C//DTD HTML 4.0//EN\\\" \\\"http://www.w3.org/TR/REC-html40/strict.dtd\\\">\\n\"\n\"<html><head><meta name=\\\"qrichtext\\\" content=\\\"1\\\" /><style type=\\\"text/css\\\">\\n\"\n\"p, li { white-space: pre-wrap; }\\n\"\n\"</style></head><body style=\\\" font-family:\\'Terminal\\'; font-size:13pt; font-weight:400; font-style:normal;\\\">\\n\"\n\"<p style=\\\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\\\"><span style=\\\" font-size:8pt;\\\">&gt;&gt; </span></p></body></html>\", None))\n\n" }, { "alpha_fraction": 0.5249179601669312, "alphanum_fraction": 0.5310056805610657, "avg_line_length": 37.43119430541992, "blob_id": "0c60215fbcb8968597df79df855808ec75a96b4f", "content_id": "2dca6cccc04260156cb4571d6f65a74ee665001c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16755, "license_type": "no_license", "max_line_length": 120, "num_lines": 436, "path": "/src/calculator_routines.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "###############################################################\n# #\n# George E. Dietz #\n# Byron C. Wallace #\n# #\n# CEBM @ Brown #\n# OpenMeta[analyst] #\n# --- #\n# Binary data form module; for flexible entry of dichotomous #\n# outcome data #\n###############################################################\n\nfrom PyQt4.Qt import *\nfrom functools import partial\n\nfrom meta_globals import *\nimport meta_py_r\n\ndef between_bounds(est=None, low=None, high=None):\n def my_lt(a,b):\n if is_a_float(a) and is_a_float(b):\n return float(a) < float(b)\n else:\n return None\n \n good_result = my_lt(low,est)\n okay = True if not (good_result is None) else False\n if okay and not good_result:\n msg = \"The lower CI must be less than the point estimate!\"\n return False,msg\n \n good_result = my_lt(est,high)\n okay = True if not (good_result is None) else False\n if okay and not good_result:\n msg = \"The higher CI must be greater than the point estimate!\"\n return False,msg\n \n good_result = my_lt(low,high)\n okay = True if not (good_result is None) else False\n if okay and not good_result:\n msg = \"The lower CI must be less than the higher CI!\"\n return False,msg\n \n return True,None\n\ndef cast_to_int(value, name=None):\n '''Converts value to int if possible'''\n try:\n rounded = round(float(value))\n return int(rounded)\n except:\n if not name is None:\n print(\"Could not convert %s='%s' to int\" % (name,str(value)))\n else:\n print(\"Could not convert '%s' to int\" % (str(value)))\n return None\n\ndef compute_2x2_table(params):\n ''' Computes values for the whole 2x2 table if possible based on partial values from the rest of the table'''\n \n # Realized R code is screwy.... now for some more screwy code that hopefully works better\n table = [[ params['c11'], params['c12'], params['r1sum']],\n [ params['c21'], params['c22'], params['r2sum']],\n [ params['c1sum'], params['c2sum'], params['total'] ]]\n \n while True:\n changed = False \n for row in range(3):\n for col in range(3):\n # go through row-wise\n if table[row][col] in EMPTY_VALS:\n if col == 0:\n try:\n table[row][col] = table[row][2] - table[row][1]\n changed = True\n except:\n pass\n if col == 1:\n try:\n table[row][col] = table[row][2] - table[row][0]\n changed = True\n except:\n pass\n if col == 2:\n try:\n table[row][col] = table[row][0] + table[row][1]\n changed = True\n except:\n pass\n # and now column-wise\n if table[row][col] in EMPTY_VALS:\n if row == 0:\n try:\n table[row][col] = table[2][col] - table[1][col]\n changed = True\n except:\n pass\n if row == 1:\n try:\n table[row][col] = table[2][col] - table[0][col]\n changed = True\n except:\n pass\n if row == 2:\n try:\n table[row][col] = table[0][col] + table[1][col]\n changed = True\n except:\n pass\n if not changed:\n break\n ## end of big while loop\n \n coef = {}\n coef['c11'] = table[0][0]\n coef['c12'] = table[0][1]\n coef['r1sum'] = table[0][2]\n coef['c21'] = table[1][0]\n coef['c22'] = table[1][1]\n coef['r2sum'] = table[1][2]\n coef['c1sum'] = table[2][0]\n coef['c2sum'] = table[2][1]\n coef['total'] = table[2][2]\n \n return coef\n\n# Consistency checking code for 2x2 tables (binary and diagnostic)\n########################### CONSISTENCY CHECKING CODE ##########################\nclass ConsistencyChecker():\n def __init__(self,fn_consistent=None,fn_inconsistent=None,table_2x2=None):\n functions_passed = (not fn_consistent is None) and (not fn_inconsistent is None)\n assert functions_passed, \"Not enough functions passed to check_for_consistencies\"\n assert not table_2x2 is None, \"No table argument passed.\"\n \n self.inconsistent = False\n self.inconsistent_action = fn_inconsistent\n self.consistent_action = fn_consistent\n self.table = table_2x2\n \n def run(self):\n msg = self.check_for_consistencies()\n \n if not self.inconsistent:\n self._color_all(color=OK_COLOR)\n return msg\n \n def check_for_consistencies(self):\n self.inconsistent = False\n rows_sum = self.check_that_rows_sum() # also colors non-summing rows\n cols_sum = self.check_that_cols_sum()\n all_pos = self.check_that_values_positive()\n \n if self.inconsistent:\n self.inconsistent_action()\n else:\n self.consistent_action()\n \n if not rows_sum:\n return \"Rows must sum!\"\n elif not cols_sum:\n return \"Columns must sum!\"\n elif not all_pos:\n return \"Counts must be positive!\"\n else:\n return None\n \n def check_that_rows_sum(self):\n rows_sum = True\n for row in range(3):\n if self._row_is_populated(row):\n row_sum = 0\n for col in range(2):\n row_sum += self._get_int(row, col)\n if not row_sum == self._get_int(row, 2):\n self._color_row(row)\n self.inconsistent = True\n rows_sum = False\n return rows_sum\n \n def _get_int(self, i, j):\n '''Get value from cell specified by row=i, col=j as an integer'''\n if not self._is_empty_cell(i,j):\n return int(float(self.table.item(i, j).text()))\n else:\n return None # its good to be explicit\n \n def check_that_cols_sum(self):\n cols_sum = True\n for col in range(3):\n if self._col_is_populated(col):\n col_sum = 0\n for row in range(2):\n col_sum += self._get_int(row,col)\n if not col_sum == self._get_int(2,col):\n self._color_col(col)\n self.inconsistent = True\n cols_sum = False\n return cols_sum\n \n def check_that_values_positive(self):\n all_positive = True\n \n for row in range(3):\n for col in range(3):\n value = self._get_int(row,col)\n if not value in EMPTY_VALS:\n if value < 0:\n # Color item\n self.table.blockSignals(True)\n self.table.item(row,col).setTextColor(ERROR_COLOR)\n self.table.blockSignals(False)\n # Set flag\n self.inconsistent = True\n all_positive = False\n return all_positive\n \n def _color_all(self, color=ERROR_COLOR):\n self.table.blockSignals(True)\n for row in range(3):\n for col in range(3):\n #print \"setting row: %s, col: %s\" % (row, col)\n item = self.table.item(row, col)\n if item is not None:\n item.setTextColor(color)\n self.table.blockSignals(False)\n \n def _color_row(self, row):\n self.table.blockSignals(True)\n for col in range(3):\n print \"setting row: %s, col: %s\" % (row, col)\n self.table.item(row, col).setTextColor(ERROR_COLOR)\n self.table.blockSignals(False)\n \n def _color_col(self, col):\n self.table.blockSignals(True)\n for row in range(3):\n print \"setting row: %s, col: %s\" % (row, col)\n self.table.item(row, col).setTextColor(ERROR_COLOR)\n self.table.blockSignals(False)\n \n def _row_is_populated(self, row):\n \n result = not True in [self._is_empty_cell(row, col) for col in range(3)]\n if result:\n print \"Row %d is populated\" % row\n return result\n def _col_is_populated(self, col):\n return not True in [self._is_empty_cell(row, col) for row in range(3)]\n \n def _is_empty_cell(self, i, j):\n val = self.table.item(i,j)\n return val is None or val.text() == \"\"\n########################### END CONSISTENCY CHECKER ############################\n\n####### SHARED BINARY, CONTINUOUS, DIAGNOSTIC DATA FORM UTILITY FUNCTIONS ######\ndef enable_txt_box_input(*args):\n ''' Enables text boxes if they are empty, disables them otherwise\n Input is textbox(es) '''\n \n for text_box in args:\n text_box.blockSignals(True)\n \n text_box.setEnabled(False)\n if text_box.text() in EMPTY_VALS:\n text_box.setEnabled(True)\n \n text_box.blockSignals(False)\n \ndef init_ci_spinbox_and_label(ci_spinbox, ci_label, value=None):\n if value is None:\n raise ValueError(\"Confidence level must be specified\")\n \n ci_spinbox.blockSignals(True)\n ci_spinbox.setValue(value)\n ci_label.setText(\"{0:.1f}% Confidence Interval\".format(ci_spinbox.value()))\n ci_spinbox.blockSignals(False)\n \nCHANGE_CI_ALERT_BASE_MSG = (\n \"The size of the confidence level used for a particular study in this \"\n \"calculator need not correspond with the global confidence level \"\n \"(currently set at {0:.1%}) chosen for data display on spreadsheets and \"\n \"forest plots.\")\ndef get_CHANGE_CI_ALERT_MSG(conf_level):\n if conf_level is None:\n raise ValueError(\"Confidence level must be specified\")\n \n return CHANGE_CI_ALERT_BASE_MSG.format(conf_level/100.0)\n\ndef helper_set_current_effect(ma_unit, txt_boxes, current_effect, group_str, data_type, mult=None):\n '''Fills in text boxes on calculator forms with data from ma unit.\n I noticed all 3 set_current_effect functions in the 3 calculators are\n nearly identical so it makes sense to share the similiar parts'''\n \n if mult is None:\n raise ValueError(\"mult must be specified\")\n \n if data_type == \"binary\":\n conv_to_disp_scale = lambda x: meta_py_r.binary_convert_scale(x, current_effect, convert_to=\"display.scale\")\n elif data_type == \"continuous\":\n conv_to_disp_scale = lambda x: meta_py_r.continuous_convert_scale(x, current_effect, convert_to=\"display.scale\")\n elif data_type == \"diagnostic\":\n conv_to_disp_scale = lambda x: meta_py_r.diagnostic_convert_scale(x, current_effect, convert_to=\"display.scale\")\n else:\n raise Exception(\"data_type unrecognized\")\n effect_tbox, lower_tbox, upper_tbox = [txt_boxes[box_name] for box_name in (\"effect\",\"lower\",\"upper\")]\n \n (est,lower,upper) = ma_unit.get_effect_and_ci(current_effect, group_str, mult)\n (d_est,d_lower,d_upper) = [conv_to_disp_scale(x) for x in (est,lower,upper)]\n for val, txt_box in zip((d_est,d_lower,d_upper),\n [effect_tbox, lower_tbox, upper_tbox]):\n txt_box.blockSignals(True)\n if val is not None:\n txt_box.setText(QString(\"%s\" % round(val, CALC_NUM_DIGITS)))\n else:\n txt_box.setText(QString(\"\"))\n txt_box.blockSignals(False)\n\ndef save_table_data(table):\n nrows, ncols = table.rowCount(), table.columnCount()\n \n none_row = [None]*ncols\n table_backup = []\n for dummy in range(nrows):\n table_backup.append(none_row[:])\n \n for row in range(nrows):\n for col in range(ncols):\n item = table.item(row, col)\n contents = \"\" if item is None else item.text()\n table_backup[row][col] = contents\n return table_backup\n\nclass CommandFieldChanged(QUndoCommand):\n def __init__(self, restore_new_f = None, restore_old_f = None,\n parent=None, description=\"\"):\n super(CommandFieldChanged, self).__init__(description)\n \n self.parent = parent\n self.just_created = True\n self.restore_new_f = restore_new_f\n self.restore_old_f = restore_old_f\n \n def redo(self):\n if self.just_created:\n self.just_created = False\n self.parent.enable_back_calculation_btn()\n else:\n print(\"Restoring new ma_unit\")\n self.restore_new_f()\n #self.parent.enable_back_calculation_btn() ##\n \n def undo(self):\n print(\"Restoring old ma_unit\")\n self.restore_old_f()\n #self.parent.enable_back_calculation_btn() ##\n\n# Currently unused?\ndef reset_table_item_flags(table):\n nrows = table.rowCount()\n ncols = table.columnCount()\n \n table.blockSignals(True)\n for row in range(nrows):\n for col in range(ncols):\n item = table.item(row, col)\n if not item is None:\n newflags = item.flags() | Qt.ItemIsEditable\n item.setFlags(newflags)\n table.blockSignals(False)\n \ndef block_signals(widgets, state):\n for widget in widgets:\n widget.blockSignals(state)\n\n# Only used in binary and continuous? \ndef get_raw_data(ma_unit, groups):\n raw_data_dict = {}\n for group in groups:\n raw_data = ma_unit.get_raw_data_for_group(group)\n raw_data_dict[group] = raw_data\n return raw_data_dict\n\ndef _input_fields_disabled(table, text_boxes):\n table_disabled = table_cells_editable(table)\n txt_boxes_disabled = _txt_boxes_disabled(text_boxes)\n\n if table_disabled and txt_boxes_disabled:\n return True\n return False\n\ndef table_cells_editable(table):\n cells_uneditable = True\n nrows = table.rowCount()\n ncols = table.columnCount()\n for row in range(nrows):\n for col in range(ncols):\n item = table.item(row, col)\n if item is None:\n continue\n if (item.flags() & Qt.ItemIsEditable) == Qt.ItemIsEditable:\n cells_uneditable = False\n return cells_uneditable\n \ndef _txt_boxes_disabled(text_boxes):\n return not any([box.isEnabled() for box in text_boxes])\n\n# Function for testing validity and range conditions in form txt boxes\ndef evaluate(new_text, ma_unit, curr_effect, group_str, conv_to_disp_scale, ci_param = None,\n parent=None, opt_cmp_fn=None, opt_cmp_msg=None, mult=None):\n '''opt_cmp_fn i.e. 'Optional Compare Function' should return True when the\n desired condition is met and False otherwise. It is a function of new_text:\n opt_cmp_fn(new_text)'''\n \n if mult is None:\n raise ValueError(\"mult must be specified\")\n \n est,lower,upper = ma_unit.get_effect_and_ci(curr_effect, group_str, mult) # calc scale\n d_est,d_lower,d_upper = [conv_to_disp_scale(x) for x in (est,lower,upper)]\n is_between_bounds = partial(between_bounds, est=d_est, low=d_lower, high=d_upper)\n ###### ERROR CHECKING CODE#####\n # Make sure entered value is numeric and between the appropriate bounds\n if not is_a_float(new_text) :\n QMessageBox.warning(parent, \"whoops\", \"Must be numeric!\")\n raise Exception(\"error\")\n if not opt_cmp_fn: # est, lower, upper\n (good_result, msg) = is_between_bounds(**{ci_param:new_text})\n if not good_result:\n QMessageBox.warning(parent, \"whoops\", msg)\n raise Exception(\"error\")\n else: # something other than est, lower, upper (like correlation or prevalence)\n print(\"Result of correlation evaluation is: %s\" % str(opt_cmp_fn(new_text)))\n if not opt_cmp_fn(new_text):\n QMessageBox.warning(parent, \"whoops\", opt_cmp_msg)\n print(\"raising exception\")\n raise Exception(\"error\")\n return float(new_text) # display_scale_val" }, { "alpha_fraction": 0.6605113744735718, "alphanum_fraction": 0.6619318127632141, "avg_line_length": 36.105262756347656, "blob_id": "df7a060c9abbe06697364ee2fc2fd7e07752c060", "content_id": "c6eb4a6fcc44ae72a2be94fb024fcd627da621b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 704, "license_type": "no_license", "max_line_length": 80, "num_lines": 19, "path": "/src/edit_group_name_form.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "from PyQt4.Qt import *\nimport forms.ui_edit_group_name\n\nclass EditGroupName(QDialog, forms.ui_edit_group_name.Ui_group_name_dialog):\n \n def __init__(self, cur_group_name, parent=None):\n super(EditGroupName, self).__init__(parent)\n self.setupUi(self)\n self.group_name_le.setText(cur_group_name)\n \n \nclass EditCovariateName(QDialog, forms.ui_edit_group_name.Ui_group_name_dialog):\n \n def __init__(self, cur_cov_name, parent=None):\n super(EditCovariateName, self).__init__(parent)\n self.setupUi(self)\n self.group_name_le.setText(cur_cov_name)\n self.field_lbl.setText(\"covariate name:\")\n self.setWindowTitle(\"edit covariate name\")" }, { "alpha_fraction": 0.5114671587944031, "alphanum_fraction": 0.5299571752548218, "avg_line_length": 38.881839752197266, "blob_id": "906a74131129bf453228a83a6c64d74ed3c01f47", "content_id": "6483612f94e2c608afac50b121fc4c2a5a0fd67c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 18226, "license_type": "no_license", "max_line_length": 214, "num_lines": 457, "path": "/src/R/HSROC/R/HSROC.R", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "HSROC <-\nfunction (data, iter.num, init = NULL, sub_rs = NULL, first.run = TRUE, \n path = getwd(), refresh = 100, prior.SEref = NULL, prior.SPref = NULL, \n prior_PI = c(0, 1), prior_LAMBDA = c(-3, 3), prior_THETA = c(-1.5, \n 1.5), prior_sd_alpha = list(0, 2, \"sd\"), prior_sd_theta = list(0, \n 2, \"sd\"), prior_beta = c(-0.75, 0.75)) \n{\n if (missing(data)) \n stop(\"You must provide a valid 'data' argument\", call. = FALSE)\n N = length(data[, 1])\n Mem.check = N * iter.num * 8\n if (Mem.check > 1.6e+08) {\n print(\"Warning\")\n print(\"You might come into trouble regarding memory allocation if you are using 32-bit version\")\n print(\"Please select one of the options below\")\n switch(menu(c(\"Abord and choose fewer iterations\", \"Ignore this warning\")), \n return(\"Please select fewer iterations\"), NULL)\n }\n if (missing(iter.num) | iter.num <= 0) {\n cat(\"The number of iteration is either missing or less than 1. \\n\", \n call. = FALSE)\n stop(\"Please respecify and call HSROC() again.\\n\")\n }\n if (is.null(sub_rs) == TRUE) {\n sub_rs = list(1, 1:N)\n }\n if (sub_rs[[1]] != (length(sub_rs) - 1)) {\n cat(paste(\"The value of the first element of 'sub_rs' (sub_rs[[1]] = \", \n sub_rs[[1]], \" ) does not match the number of remaining elements (length(sub_rs[[2:\", \n length(sub_rs), \"]])) = \", length(2:length(sub_rs)), \n \"\\n\", sep = \"\"))\n stop(\"Please respecify and call HSROC() again.\\n\")\n }\n if (is.logical(first.run) == FALSE) {\n cat(\"The 'first.run' argument must be a logical object. \\n\")\n stop(\"Please respecify and call HSROC() again.\\n\")\n }\n if (is.null(prior.SEref) == FALSE | is.null(prior.SPref) == \n FALSE) {\n if ((length(prior.SEref)/2 + length(prior.SEref)/2)/2 != \n sub_rs[[1]]) {\n cat(\"The number of reference standards in 'prior.SEref' and(or) 'prior.SPref' is not matching the one defined in the 'sub_rs[[1]]' argument. \\n\")\n stop(\"Please respecify and call HSROC() again.\\n\")\n }\n }\n if (is.null(prior.SEref) == TRUE & is.null(prior.SPref) == \n TRUE) {\n Gold_Std = TRUE\n }\n else {\n Gold_Std = FALSE\n }\n if (is.null(prior.SEref) == TRUE) {\n write(1, file = \"S2.txt\", ncolumns = 1)\n }\n else {\n write(2, file = \"S2.txt\", ncolumns = 1)\n }\n if (is.null(prior.SPref) == TRUE) {\n write(1, file = \"C2.txt\", ncolumns = 1)\n }\n else {\n write(2, file = \"C2.txt\", ncolumns = 1)\n }\n if (is.null(init) == FALSE) {\n random = FALSE\n if (sum(dim(init[[1]])) != N + 5) {\n cat(paste(\"Initial values for the within-study parameters were misspecified. Make sure the ordering described in the help file is preserved. \\n\"))\n stop(\"Please respecify and call HSROC() again.\\n\")\n }\n if (length(init[[2]]) != 5) {\n cat(paste(\"Initial values for the between-study parameters were misspecified. Make sure the ordering described in the help file is preserved. \\n\"))\n stop(\"Please respecify and call HSROC() again.\\n\")\n }\n if (Gold_Std == FALSE) {\n if (sum(dim(init[[3]])) != sub_rs[[1]] + 2) {\n cat(paste(\"Initial values for the test under evaluation were misspecified. Make sure the ordering described in the help file is preserved. \\n\"))\n stop(\"Please respecify and call HSROC() again.\\n\")\n }\n }\n }\n else {\n random = TRUE\n }\n low.pi = prior_PI[1]\n up.pi = prior_PI[2]\n if (all(low.pi < up.pi) == FALSE) {\n cat(\"The 'prior_PI' argument is a vector with 2 components specifying a range. Thus, the first component of the vector must be less than the second component. Type '? HSROC' for more help. \\n\")\n stop(\"Please respecify and call HSROC() again.\\n\")\n }\n prior.LAMBDA.lower = prior_LAMBDA[1]\n prior.LAMBDA.upper = prior_LAMBDA[2]\n if (all(prior.LAMBDA.lower < prior.LAMBDA.upper) == FALSE) {\n cat(\"The 'prior_LAMBDA' argument is a vector with 2 components specifying a range. Thus, the first component of the vector must be less than the second component. Type '? HSROC' for more help. \\n\")\n stop(\"Please respecify and call HSROC() again.\\n\")\n }\n prior.THETA.lower = prior_THETA[1]\n prior.THETA.upper = prior_THETA[2]\n if (all(prior.THETA.lower < prior.THETA.upper) == FALSE) {\n cat(\"The 'prior_THETA' argument is a vector with 2 components specifying a range. Thus, the first component of the vector must be less than the second component. Type '? HSROC' for more help. \\n\")\n stop(\"Please respecify and call HSROC() again.\\n\")\n }\n l.disp.alpha = prior_sd_alpha[[1]]\n u.disp.alpha = prior_sd_alpha[[2]]\n if (all(l.disp.alpha < u.disp.alpha) == FALSE & prior_sd_alpha[[3]] != \n \"p\") {\n cat(\"The 'prior_sd_alpha' argument is a list with the first 2 components specifying a range. Thus, the first component of the list must be less than the second component. Type '? HSROC' for more help. \\n\")\n stop(\"Please respecify and call HSROC() again.\\n\")\n }\n l.disp.theta = prior_sd_theta[[1]]\n u.disp.theta = prior_sd_theta[[2]]\n if (all(l.disp.theta < u.disp.theta) == FALSE & prior_sd_theta[[3]] != \n \"p\") {\n cat(\"The 'prior_sd_theta' argument is a list with the first 2 components specifying a range. Thus, the first component of the list must be less than the second component. Type '? HSROC' for more help. \\n\")\n stop(\"Please respecify and call HSROC() again.\\n\")\n }\n if (is.null(prior_beta)) {\n beta.a = -log((prior.LAMBDA.upper/3) + 1)\n beta.b = log((prior.LAMBDA.upper/3) + 1)\n }\n else {\n beta.a = prior_beta[1]\n beta.b = prior_beta[2]\n if (all(beta.a < beta.b) == FALSE) {\n cat(\"The 'prior_beta' argument is a vector with 2 components specifying a range. Thus, the first component of the vector must be less than the second component. Type '? HSROC' for more help. \\n\")\n stop(\"Please respecify and call HSROC() again.\\n\")\n }\n }\n write(1, file = \"model.txt\", ncolumns = 1)\n write(iter.num, file = \"iter.txt\")\n data = list(data)\n file.pi = \"PI.txt\"\n file.C2 = \"Spec2.txt\"\n file.S2 = \"Sens2.txt\"\n file.alpha = \"alpha.txt\"\n file.theta = \"theta.txt\"\n file.sig.theta = \"sigma.theta.txt\"\n file.sig.alpha = \"sigma.alpha.txt\"\n file.THETA = \"capital.THETA.txt\"\n file.LAMBDA = \"LAMBDA.txt\"\n file.beta = \"beta.txt\"\n file.C1 = \"Spec1.txt\"\n file.S1 = \"Sens1.txt\"\n file.C_overall = \"C_overall.txt\"\n file.S_overall = \"S_overall.txt\"\n file.choix = \"choix.txt\"\n file.ll = \"log.likelihood.txt\"\n file.Yj = \"Y_j.txt\"\n file.TV = \"Start_values.txt\"\n file.TV2 = \"Start_values2.txt\"\n file.TV3 = \"Start_REFSTD.txt\"\n file.Restart = \"Restore.txt\"\n file.Restart2 = \"Restore2.txt\"\n file.Restart_REFSTD = \"Restore3.txt\"\n file.Restart_index = \"Restore_index.txt\"\n file.A.alpha = \"A.alpha.txt\"\n file.B.alpha = \"B.alpha.txt\"\n file.mean.rij.one = \"mean.rij.one.txt\"\n file.mean.rij.zero = \"mean.rij.zero.txt\"\n setwd(path)\n condInd = TRUE\n prior_dist_PI = \"beta\"\n range_rij = c(-prior.LAMBDA.upper/2 - 3 * exp(-beta.a/2), \n prior.LAMBDA.upper/2 + 3 * exp(beta.b/2))\n low.rj = range_rij[1]\n up.rj = range_rij[2]\n write(c(low.rj, up.rj), file = \"range of latent variable.txt\", \n ncolumns = 2)\n alpha.PI = beta.parameter(low = low.pi, up = up.pi)[1, ]\n beta.PI = beta.parameter(low = low.pi, up = up.pi)[2, ]\n L.disp.alpha = L.disp.theta = numeric()\n if (l.disp.alpha == 0) {\n L.disp.alpha = 1e-10\n }\n else {\n if (l.disp.alpha > 0) {\n L.disp.alpha = l.disp.alpha\n }\n }\n if (prior_sd_alpha[[3]] == \"sd\") {\n prior_sig_alpha = 1\n low.disp.alpha = u.disp.alpha^(-2)\n up.disp.alpha = L.disp.alpha^(-2)\n write(1, file = \"Prior on sigma_alpha.txt\", ncolumns = 1)\n }\n else {\n if (prior_sd_alpha[[3]] == \"v\") {\n prior_sig_alpha = 2\n low.disp.alpha = u.disp.alpha^(-1)\n up.disp.alpha = L.disp.alpha^(-1)\n write(2, file = \"Prior on sigma_alpha.txt\", ncolumns = 1)\n }\n else {\n if (prior_sd_alpha[[3]] == \"p\") {\n prior_sig_alpha = 3\n low.disp.alpha = L.disp.alpha\n up.disp.alpha = u.disp.alpha\n write(3, file = \"Prior on sigma_alpha.txt\", ncolumns = 1)\n }\n }\n }\n if (l.disp.theta == 0) {\n L.disp.theta = 1e-10\n }\n else {\n if (l.disp.theta > 0) {\n L.disp.theta = l.disp.theta\n }\n }\n if (prior_sd_theta[[3]] == \"sd\") {\n prior_sig_theta = 1\n low.disp.theta = u.disp.theta^(-2)\n up.disp.theta = L.disp.theta^(-2)\n write(1, file = \"Prior on sigma_theta.txt\", ncolumns = 1)\n }\n else {\n if (prior_sd_theta[[3]] == \"v\") {\n prior_sig_theta = 2\n low.disp.theta = u.disp.theta^(-1)\n up.disp.theta = L.disp.theta^(-1)\n write(2, file = \"Prior on sigma_theta.txt\", ncolumns = 1)\n }\n else {\n if (prior_sd_theta[[3]] == \"p\") {\n prior_sig_theta = 3\n low.disp.theta = L.disp.theta\n up.disp.theta = u.disp.theta\n write(3, file = \"Prior on sigma_theta.txt\", ncolumns = 1)\n }\n }\n }\n long.se = length(prior.SEref)\n low.se = prior.SEref[1:sub_rs[[1]]]\n up.se = prior.SEref[(sub_rs[[1]] + 1):long.se]\n long.sp = length(prior.SPref)\n low.sp = prior.SPref[1:sub_rs[[1]]]\n up.sp = prior.SPref[(sub_rs[[1]] + 1):long.sp]\n if (Gold_Std == FALSE) {\n if (is.null(prior.SEref) == TRUE) {\n Sens2.alpha = Sens2.beta = NULL\n Gold_se = TRUE\n }\n else {\n if (is.null(prior.SEref) == FALSE) {\n Sens2.alpha = beta.parameter(low = low.se, up = up.se)[1, \n ]\n Sens2.beta = beta.parameter(low = low.se, up = up.se)[2, \n ]\n Gold_se = FALSE\n }\n }\n }\n else {\n if (Gold_Std == TRUE) {\n Sens2.alpha = Sens2.beta = NULL\n Gold_se = NULL\n }\n }\n if (Gold_Std == FALSE) {\n if (is.null(prior.SPref) == TRUE) {\n Spec2.alpha = Spec2.beta = NULL\n Gold_sp = TRUE\n }\n else {\n if (is.null(prior.SPref) == FALSE) {\n Spec2.alpha = beta.parameter(low = low.sp, up = up.sp)[1, \n ]\n Spec2.beta = beta.parameter(low = low.sp, up = up.sp)[2, \n ]\n Gold_sp = FALSE\n }\n }\n }\n else {\n if (Gold_Std == TRUE) {\n Spec2.alpha = Spec2.beta = NULL\n Gold_sp = NULL\n }\n }\n if (first.run == TRUE) {\n RESTART_i = NA\n RESTART = NA\n RESTART_REFSTD = NA\n file.create(file.Restart)\n file.create(file.Restart2)\n file.create(file.Restart_REFSTD)\n }\n else {\n DATA.restart = read.table(file.Restart)\n DATA.restart2 = read.table(file.Restart2)\n DATA.restart_refstd = read.table(file.Restart_REFSTD)\n RESTART_i = t(DATA.restart)\n RESTART = DATA.restart2\n RESTART_REFSTD = DATA.restart_refstd\n }\n PRIOR.Parameters = c(beta.a, beta.b, prior.THETA.lower, prior.THETA.upper, \n prior.LAMBDA.lower, prior.LAMBDA.upper, low.disp.alpha, \n up.disp.alpha, low.disp.theta, up.disp.theta, alpha.PI, \n beta.PI, Sens2.alpha, Sens2.beta, Spec2.alpha, Spec2.beta)\n test.results = data[[1]]\n Start.values = Which_data(RANDOM = random, data = data, init = init, \n GS = Gold_Std)[[1]]\n Start.values2 = Which_data(RANDOM = random, data = data, \n init = init, GS = Gold_Std)[[2]]\n Start.REFSTD = Which_data(RANDOM = random, data = data, init = init, \n GS = Gold_Std)[[3]]\n INITS = Initialization(first.run = first.run, random = random, \n param = PRIOR.Parameters, cond.Ind = condInd, rs = sub_rs, \n GS_se = Gold_se, GS_sp = Gold_sp, Data1 = Start.values, \n Data2 = RESTART_i, Data3 = RESTART, Data4 = Start.values2, \n Data5 = Start.REFSTD, Data6 = RESTART_REFSTD, path = path, \n studies = N, sco = FALSE, psa = prior_sd_alpha[[3]], \n pst = prior_sd_theta[[3]])\n if (INITS[[1]][3] == 0 | INITS[[1]][1] == 0) {\n cat(paste(\"Unsuitable initial values were provided. \"))\n stop(\"Please respecify and call HSROC() again.\\n If you're using 'init=NULL' you need just to run the 'HSROC' function again.\\n\")\n }\n init.sigma.alpha = INITS[[1]][3]\n prec.alpha = INITS[[1]][4]\n init.THETA = INITS[[1]][5]\n init.LAMBDA = INITS[[1]][6]\n init.beta = INITS[[1]][7]\n init.alpha = as.vector(INITS[[2]][, 1])\n init.S1 = as.vector(INITS[[2]][, 3])\n init.C1 = as.vector(INITS[[2]][, 4])\n init.PI = as.vector(INITS[[2]][, 5])\n init.sigma.theta = INITS[[1]][1]\n prec.theta = INITS[[1]][2]\n init.theta = as.vector(INITS[[2]][, 2])\n if (Gold_Std == FALSE) {\n if (Gold_se == TRUE) {\n init.C2 = as.vector(INITS[[3]][2, ])\n }\n else {\n if (Gold_sp == TRUE) {\n init.S2 = as.vector(INITS[[3]][1, ])\n }\n else {\n init.S2 = as.vector(INITS[[3]][1, ])\n init.C2 = as.vector(INITS[[3]][2, ])\n }\n }\n }\n D = DATA.organizer(d = test.results, m = N)\n n = D[[1]]\n All.Studies = D[[2]]\n t1 = numeric()\n t2 = numeric()\n T = t(mapply(Test, All.Studies))\n t1 = T[, 1]\n t2 = T[, 2]\n studygroup = rep((1:N), n)\n n_rs = numeric()\n n_REFSTD = REFSTD_3(rs = sub_rs, n.sample = D[[1]])\n studygroup_REFSTD = REFSTD_4(rs = sub_rs, n.sample = D[[1]], \n n_rs = n_REFSTD)\n Total = sum(n)\n n.refstd = sub_rs[[1]]\n PRIOR.BETWEEN = rbind(c(beta.a, beta.b), c(prior.THETA.lower, \n prior.THETA.upper), c(prior.LAMBDA.lower, prior.LAMBDA.upper), \n c(l.disp.alpha, u.disp.alpha), c(l.disp.theta, u.disp.theta), \n c(low.pi, up.pi), c(low.rj, up.rj))\n colnames(PRIOR.BETWEEN) = c(\"Lower bound\", \"Upper bound\")\n rownames(PRIOR.BETWEEN) = c(\"beta\", \"THETA\", \"LAMBDA\", \"sigma_alpha\", \n \"sigma_theta\", \"prevalence\", \"Range_rij\")\n write.table(PRIOR.BETWEEN, file = \"Prior.information.txt\")\n if (Gold_Std == FALSE) {\n if (Gold_se == TRUE) {\n C2.p = c()\n for (i in 1:length(n_REFSTD)) {\n C2.p = c(C2.p, paste(\"C2\", i, sep = \"\"))\n }\n PRIOR.C2 = cbind(low.sp, up.sp)\n rownames(PRIOR.C2) = C2.p\n colnames(PRIOR.C2) = c(\"lower\", \"upper\")\n write.table(PRIOR.C2, file = \"Prior.information.txt\", \n append = TRUE, col.names = FALSE)\n }\n else {\n if (Gold_sp == TRUE) {\n S2.p = c()\n for (i in 1:length(n_REFSTD)) {\n S2.p = c(S2.p, paste(\"S2\", i, sep = \"\"))\n }\n PRIOR.S2 = cbind(low.se, up.se)\n rownames(PRIOR.S2) = S2.p\n colnames(PRIOR.S2) = c(\"lower\", \"upper\")\n write.table(PRIOR.S2, file = \"Prior.information.txt\", \n append = TRUE, col.names = FALSE)\n }\n else {\n S2.p = C2.p = c()\n for (i in 1:length(n_REFSTD)) {\n S2.p = c(S2.p, paste(\"S2\", i, sep = \"\"))\n C2.p = c(C2.p, paste(\"C2\", i, sep = \"\"))\n }\n PRIOR.S2 = cbind(low.se, up.se)\n rownames(PRIOR.S2) = S2.p\n colnames(PRIOR.S2) = c(\"lower\", \"upper\")\n PRIOR.C2 = cbind(low.sp, up.sp)\n rownames(PRIOR.C2) = C2.p\n colnames(PRIOR.C2) = c(\"lower\", \"upper\")\n write.table(PRIOR.S2, file = \"Prior.information.txt\", \n append = TRUE, col.names = FALSE)\n write.table(PRIOR.C2, file = \"Prior.information.txt\", \n append = TRUE, col.names = FALSE)\n }\n }\n }\n vec.PI = as.numeric(init.PI)\n vec.S1 = as.numeric(init.S1)\n vec.C1 = as.numeric(init.C1)\n vec.alpha = as.numeric(init.alpha)\n vec.sigma.alpha = as.numeric(init.sigma.alpha)\n vec.THETA = as.numeric(init.THETA)\n vec.LAMBDA = as.numeric(init.LAMBDA)\n vec.beta = as.numeric(init.beta)\n vec.MH = as.numeric(exp(vec.beta))\n vec.sigma.theta = as.numeric(init.sigma.theta)\n vec.theta = as.numeric(init.theta)\n if (Gold_Std == TRUE) {\n vec.S2 = vec.C2 = init.S2 = init.C2 = 1\n Sens2.alpha = Sens2.beta = Spec2.alpha = Spec2.beta = 1\n }\n else {\n if (Gold_se == TRUE) {\n vec.C2 = as.numeric(init.C2)\n vec.S2 = 1\n }\n else {\n if (Gold_sp == TRUE) {\n vec.C2 = 1\n vec.S2 = as.numeric(init.S2)\n }\n else {\n vec.C2 = as.numeric(init.C2)\n vec.S2 = as.numeric(init.S2)\n }\n }\n }\n gibbs = gibbs_sampler_Cpp(iter.num, Gold_Std, Gold_se, Gold_sp, \n Total, t1, t2, init.PI, init.S1, init.S2, init.C1, init.C2, \n n, N, alpha.PI, beta.PI, n.refstd, n_REFSTD, Sens2.alpha, \n Sens2.beta, Spec2.alpha, Spec2.beta, init.alpha, init.theta, \n init.beta, low.rj, up.rj, init.THETA, init.sigma.theta, \n init.sigma.alpha, init.LAMBDA, prior.LAMBDA.lower, prior.LAMBDA.upper, \n beta.a, beta.b, prior.THETA.lower, prior.THETA.upper, \n low.disp.alpha, up.disp.alpha, low.disp.theta, up.disp.theta, \n prior_sig_alpha, prior_sig_theta, refresh)\n Restore(gibbs, Gold_Std)\n if (Gold_Std == TRUE) {\n file.remove(file.C2)\n file.remove(file.S2)\n }\n cat(paste(\"The files created during the Gibbs sampler process are in \\\"\", \n getwd(), \"\\\" \", sep = \"\"))\n}\n" }, { "alpha_fraction": 0.5695652365684509, "alphanum_fraction": 0.5702898502349854, "avg_line_length": 34.33333206176758, "blob_id": "bc9507632433a8d666daf61254fe6a600b1c9a44", "content_id": "bdd7529bb708521121d834aaa09ae2c4e3995710", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1380, "license_type": "no_license", "max_line_length": 82, "num_lines": 39, "path": "/src/diag_metrics.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "from PyQt4.Qt import *\n\nimport forms.ui_diagnostic_metrics\nimport ma_specs\n\nclass Diag_Metrics(QDialog, forms.ui_diagnostic_metrics.Ui_diag_metric):\n\n SELECTABLE_METRICS = [\"sens\", \"spec\", \"dor\", \"lr\"]\n\n def __init__(self, model, parent=None, meta_f_str=None, external_params=None):\n super(Diag_Metrics, self).__init__(parent)\n self.setupUi(self)\n self.model = model\n self.parent = parent\n self.external_params = external_params\n self.meta_f_str = meta_f_str\n QObject.connect(self.btn_ok, SIGNAL(\"pressed()\"), self.ok)\n\n def ok(self):\n form = ma_specs.MA_Specs(self.model, parent=self.parent,\n meta_f_str=self.meta_f_str,\n external_params=self.external_params,\n diag_metrics=self.get_selected_metrics(),\n conf_level=self.model.get_global_conf_level())\n form.show()\n self.hide()\n\n def get_selected_metrics(self):\n selected_metrics = []\n # just loop through all the check\n # boxes on the form and see if they're checked. \n\n for metric in self.SELECTABLE_METRICS:\n if eval(\"self.chk_box_%s.isChecked()\" % metric):\n print metric\n selected_metrics.append(metric)\n\n \n return selected_metrics\n\n\n" }, { "alpha_fraction": 0.6533951759338379, "alphanum_fraction": 0.6857717633247375, "avg_line_length": 56.4271354675293, "blob_id": "8a62eef609f9ac3e4453c325377500c90e4fbe9a", "content_id": "764686db60057cc29da9a4e1ca7f4910acaee823", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11428, "license_type": "no_license", "max_line_length": 141, "num_lines": 199, "path": "/src/forms/ui_edit_dialog.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'edit_dialog2.ui'\n#\n# Created: Wed Apr 17 14:37:20 2013\n# by: PyQt4 UI code generator 4.10\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt4 import QtCore, QtGui\n\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n def _fromUtf8(s):\n return s\n\ntry:\n _encoding = QtGui.QApplication.UnicodeUTF8\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig, _encoding)\nexcept AttributeError:\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig)\n\nclass Ui_edit_dialog(object):\n def setupUi(self, edit_dialog):\n edit_dialog.setObjectName(_fromUtf8(\"edit_dialog\"))\n edit_dialog.resize(571, 378)\n self.verticalLayout = QtGui.QVBoxLayout(edit_dialog)\n self.verticalLayout.setObjectName(_fromUtf8(\"verticalLayout\"))\n self.edit_tab = QtGui.QTabWidget(edit_dialog)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n self.edit_tab.setFont(font)\n self.edit_tab.setObjectName(_fromUtf8(\"edit_tab\"))\n self.tab = QtGui.QWidget()\n self.tab.setObjectName(_fromUtf8(\"tab\"))\n self.gridLayout = QtGui.QGridLayout(self.tab)\n self.gridLayout.setObjectName(_fromUtf8(\"gridLayout\"))\n self.groupBox_2 = QtGui.QGroupBox(self.tab)\n self.groupBox_2.setObjectName(_fromUtf8(\"groupBox_2\"))\n self.outcome_list = QtGui.QListView(self.groupBox_2)\n self.outcome_list.setGeometry(QtCore.QRect(0, 20, 171, 209))\n self.outcome_list.setAlternatingRowColors(True)\n self.outcome_list.setObjectName(_fromUtf8(\"outcome_list\"))\n self.add_outcome_btn = QtGui.QPushButton(self.groupBox_2)\n self.add_outcome_btn.setGeometry(QtCore.QRect(50, 230, 32, 32))\n self.add_outcome_btn.setMinimumSize(QtCore.QSize(32, 32))\n self.add_outcome_btn.setMaximumSize(QtCore.QSize(32, 32))\n self.add_outcome_btn.setText(_fromUtf8(\"\"))\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(_fromUtf8(\":/function_icon_set/function_icon_set/add_48.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.add_outcome_btn.setIcon(icon)\n self.add_outcome_btn.setIconSize(QtCore.QSize(32, 32))\n self.add_outcome_btn.setObjectName(_fromUtf8(\"add_outcome_btn\"))\n self.remove_outcome_btn = QtGui.QPushButton(self.groupBox_2)\n self.remove_outcome_btn.setEnabled(False)\n self.remove_outcome_btn.setGeometry(QtCore.QRect(90, 230, 32, 32))\n self.remove_outcome_btn.setMinimumSize(QtCore.QSize(32, 32))\n self.remove_outcome_btn.setMaximumSize(QtCore.QSize(32, 32))\n self.remove_outcome_btn.setText(_fromUtf8(\"\"))\n icon1 = QtGui.QIcon()\n icon1.addPixmap(QtGui.QPixmap(_fromUtf8(\":/function_icon_set/function_icon_set/cancel_48.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.remove_outcome_btn.setIcon(icon1)\n self.remove_outcome_btn.setIconSize(QtCore.QSize(32, 32))\n self.remove_outcome_btn.setObjectName(_fromUtf8(\"remove_outcome_btn\"))\n self.gridLayout.addWidget(self.groupBox_2, 0, 0, 1, 1)\n self.grpbox_follow_ups = QtGui.QGroupBox(self.tab)\n self.grpbox_follow_ups.setObjectName(_fromUtf8(\"grpbox_follow_ups\"))\n self.follow_up_list = QtGui.QListView(self.grpbox_follow_ups)\n self.follow_up_list.setGeometry(QtCore.QRect(0, 20, 171, 209))\n self.follow_up_list.setAlternatingRowColors(True)\n self.follow_up_list.setObjectName(_fromUtf8(\"follow_up_list\"))\n self.remove_follow_up_btn = QtGui.QPushButton(self.grpbox_follow_ups)\n self.remove_follow_up_btn.setEnabled(False)\n self.remove_follow_up_btn.setGeometry(QtCore.QRect(90, 230, 32, 32))\n self.remove_follow_up_btn.setMinimumSize(QtCore.QSize(32, 32))\n self.remove_follow_up_btn.setMaximumSize(QtCore.QSize(32, 32))\n self.remove_follow_up_btn.setText(_fromUtf8(\"\"))\n self.remove_follow_up_btn.setIcon(icon1)\n self.remove_follow_up_btn.setIconSize(QtCore.QSize(32, 32))\n self.remove_follow_up_btn.setObjectName(_fromUtf8(\"remove_follow_up_btn\"))\n self.add_follow_up_btn = QtGui.QPushButton(self.grpbox_follow_ups)\n self.add_follow_up_btn.setGeometry(QtCore.QRect(50, 230, 32, 32))\n self.add_follow_up_btn.setMinimumSize(QtCore.QSize(32, 32))\n self.add_follow_up_btn.setMaximumSize(QtCore.QSize(32, 32))\n self.add_follow_up_btn.setText(_fromUtf8(\"\"))\n self.add_follow_up_btn.setIcon(icon)\n self.add_follow_up_btn.setIconSize(QtCore.QSize(32, 32))\n self.add_follow_up_btn.setObjectName(_fromUtf8(\"add_follow_up_btn\"))\n self.gridLayout.addWidget(self.grpbox_follow_ups, 0, 1, 1, 1)\n self.groupBox = QtGui.QGroupBox(self.tab)\n self.groupBox.setFlat(False)\n self.groupBox.setObjectName(_fromUtf8(\"groupBox\"))\n self.group_list = QtGui.QListView(self.groupBox)\n self.group_list.setGeometry(QtCore.QRect(0, 20, 171, 211))\n self.group_list.setAlternatingRowColors(True)\n self.group_list.setObjectName(_fromUtf8(\"group_list\"))\n self.add_group_btn = QtGui.QPushButton(self.groupBox)\n self.add_group_btn.setGeometry(QtCore.QRect(50, 230, 32, 32))\n self.add_group_btn.setMinimumSize(QtCore.QSize(32, 32))\n self.add_group_btn.setMaximumSize(QtCore.QSize(32, 32))\n self.add_group_btn.setText(_fromUtf8(\"\"))\n self.add_group_btn.setIcon(icon)\n self.add_group_btn.setIconSize(QtCore.QSize(32, 32))\n self.add_group_btn.setObjectName(_fromUtf8(\"add_group_btn\"))\n self.remove_group_btn = QtGui.QPushButton(self.groupBox)\n self.remove_group_btn.setEnabled(False)\n self.remove_group_btn.setGeometry(QtCore.QRect(90, 230, 32, 32))\n self.remove_group_btn.setMinimumSize(QtCore.QSize(32, 32))\n self.remove_group_btn.setMaximumSize(QtCore.QSize(32, 32))\n self.remove_group_btn.setText(_fromUtf8(\"\"))\n self.remove_group_btn.setIcon(icon1)\n self.remove_group_btn.setIconSize(QtCore.QSize(32, 32))\n self.remove_group_btn.setObjectName(_fromUtf8(\"remove_group_btn\"))\n self.gridLayout.addWidget(self.groupBox, 0, 2, 1, 1)\n self.edit_tab.addTab(self.tab, _fromUtf8(\"\"))\n self.tab_2 = QtGui.QWidget()\n self.tab_2.setObjectName(_fromUtf8(\"tab_2\"))\n self.verticalLayout_2 = QtGui.QVBoxLayout(self.tab_2)\n self.verticalLayout_2.setObjectName(_fromUtf8(\"verticalLayout_2\"))\n self.gridLayout_2 = QtGui.QGridLayout()\n self.gridLayout_2.setObjectName(_fromUtf8(\"gridLayout_2\"))\n self.study_list = QtGui.QListView(self.tab_2)\n self.study_list.setAlternatingRowColors(True)\n self.study_list.setObjectName(_fromUtf8(\"study_list\"))\n self.gridLayout_2.addWidget(self.study_list, 0, 0, 1, 3)\n self.add_study_btn = QtGui.QPushButton(self.tab_2)\n self.add_study_btn.setMinimumSize(QtCore.QSize(32, 32))\n self.add_study_btn.setMaximumSize(QtCore.QSize(32, 32))\n self.add_study_btn.setText(_fromUtf8(\"\"))\n self.add_study_btn.setIcon(icon)\n self.add_study_btn.setIconSize(QtCore.QSize(32, 32))\n self.add_study_btn.setObjectName(_fromUtf8(\"add_study_btn\"))\n self.gridLayout_2.addWidget(self.add_study_btn, 1, 0, 1, 1)\n self.remove_study_btn = QtGui.QPushButton(self.tab_2)\n self.remove_study_btn.setEnabled(False)\n self.remove_study_btn.setMinimumSize(QtCore.QSize(32, 32))\n self.remove_study_btn.setMaximumSize(QtCore.QSize(32, 32))\n self.remove_study_btn.setText(_fromUtf8(\"\"))\n self.remove_study_btn.setIcon(icon1)\n self.remove_study_btn.setIconSize(QtCore.QSize(32, 32))\n self.remove_study_btn.setObjectName(_fromUtf8(\"remove_study_btn\"))\n self.gridLayout_2.addWidget(self.remove_study_btn, 1, 1, 1, 1)\n self.verticalLayout_2.addLayout(self.gridLayout_2)\n self.edit_tab.addTab(self.tab_2, _fromUtf8(\"\"))\n self.tab_3 = QtGui.QWidget()\n self.tab_3.setObjectName(_fromUtf8(\"tab_3\"))\n self.horizontalLayout = QtGui.QHBoxLayout(self.tab_3)\n self.horizontalLayout.setObjectName(_fromUtf8(\"horizontalLayout\"))\n self.gridLayout_3 = QtGui.QGridLayout()\n self.gridLayout_3.setObjectName(_fromUtf8(\"gridLayout_3\"))\n self.covariate_list = QtGui.QListView(self.tab_3)\n self.covariate_list.setAlternatingRowColors(True)\n self.covariate_list.setObjectName(_fromUtf8(\"covariate_list\"))\n self.gridLayout_3.addWidget(self.covariate_list, 0, 0, 1, 3)\n self.add_covariate_btn = QtGui.QPushButton(self.tab_3)\n self.add_covariate_btn.setMinimumSize(QtCore.QSize(32, 32))\n self.add_covariate_btn.setMaximumSize(QtCore.QSize(32, 32))\n self.add_covariate_btn.setText(_fromUtf8(\"\"))\n self.add_covariate_btn.setIcon(icon)\n self.add_covariate_btn.setIconSize(QtCore.QSize(32, 32))\n self.add_covariate_btn.setObjectName(_fromUtf8(\"add_covariate_btn\"))\n self.gridLayout_3.addWidget(self.add_covariate_btn, 1, 0, 1, 1)\n self.remove_covariate_btn = QtGui.QPushButton(self.tab_3)\n self.remove_covariate_btn.setEnabled(False)\n self.remove_covariate_btn.setMinimumSize(QtCore.QSize(32, 32))\n self.remove_covariate_btn.setMaximumSize(QtCore.QSize(32, 32))\n self.remove_covariate_btn.setText(_fromUtf8(\"\"))\n self.remove_covariate_btn.setIcon(icon1)\n self.remove_covariate_btn.setIconSize(QtCore.QSize(32, 32))\n self.remove_covariate_btn.setObjectName(_fromUtf8(\"remove_covariate_btn\"))\n self.gridLayout_3.addWidget(self.remove_covariate_btn, 1, 1, 1, 1)\n self.horizontalLayout.addLayout(self.gridLayout_3)\n self.edit_tab.addTab(self.tab_3, _fromUtf8(\"\"))\n self.verticalLayout.addWidget(self.edit_tab)\n self.buttonBox = QtGui.QDialogButtonBox(edit_dialog)\n self.buttonBox.setOrientation(QtCore.Qt.Horizontal)\n self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)\n self.buttonBox.setObjectName(_fromUtf8(\"buttonBox\"))\n self.verticalLayout.addWidget(self.buttonBox)\n\n self.retranslateUi(edit_dialog)\n self.edit_tab.setCurrentIndex(0)\n QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8(\"accepted()\")), edit_dialog.accept)\n QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8(\"rejected()\")), edit_dialog.reject)\n QtCore.QMetaObject.connectSlotsByName(edit_dialog)\n\n def retranslateUi(self, edit_dialog):\n edit_dialog.setWindowTitle(_translate(\"edit_dialog\", \"edit dataset\", None))\n self.groupBox_2.setTitle(_translate(\"edit_dialog\", \"outcomes\", None))\n self.grpbox_follow_ups.setTitle(_translate(\"edit_dialog\", \"follow ups\", None))\n self.groupBox.setTitle(_translate(\"edit_dialog\", \"tx groups\", None))\n self.edit_tab.setTabText(self.edit_tab.indexOf(self.tab), _translate(\"edit_dialog\", \"outcomes/follow-ups/groups\", None))\n self.edit_tab.setTabText(self.edit_tab.indexOf(self.tab_2), _translate(\"edit_dialog\", \"studies\", None))\n self.edit_tab.setTabText(self.edit_tab.indexOf(self.tab_3), _translate(\"edit_dialog\", \"covariates\", None))\n\nimport icons_rc\n" }, { "alpha_fraction": 0.5011718273162842, "alphanum_fraction": 0.5492847561836243, "avg_line_length": 32.383094787597656, "blob_id": "fec86caa6bc1a2c7e12be872d49b5d623111d9ea", "content_id": "16f3e58149e797252d53c8ebbccfffbfc52fa62f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 37121, "license_type": "no_license", "max_line_length": 317, "num_lines": 1112, "path": "/src/R/openmetar/R/data_transform.r", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "isnt.null <- function(x){\n # some syntactic sugar.\n ! is.null(x) \n}\n\nisnt.na <- function(x) {\n\t!is.na(x)\n}\n\nIMAGINARY.THRESHOLD <- 1E-8\n\n\n############################ \n# Binary data calculation #\n############################\n\ngimpute.bin.data <- function(bin.data) {\n\t# Imputes binary 2x2 tables from fields in the bin.data frame parameter\n\t# \n\t# a,b,c and d are the respective entries for the 2-x-2 table. they denote \n\t# treated events, treated total, control events, and control total, respectively\n\t#\n\t# There will be two sets of possible results for each parameter since the solution involves a quadratic\n\t#\n\t# est, lower, upper assumed to be symmetric eg. in the log OR scale vs untransformed OR\n\tmetric <- as.character(bin.data[[\"metric\"]])\n\test <- bin.data[[\"estimate\"]]\n\tlower <- bin.data[[\"lower\"]]\n\tupper <- bin.data[[\"upper\"]]\n\t#Ev_A <- bin.data[[\"Ev_A\"]]\n\t#Ev_B <- bin.data[[\"Ev_B\"]]\n\tN_1 <- bin.data[[\"N_A\"]]\n\tN_0 <- bin.data[[\"N_B\"]]\n\tconf.level <- bin.data[[\"conf.level\"]]\n\t\n\t# See if we have enough inputs to proceed\n\test_low_up_ok <- isnt.null(est) & (isnt.null(lower) | isnt.null(upper))\n\test_low_up_ok <- est_low_up_ok | (isnt.null(lower) & isnt.null(upper))\n\tinputs_sufficient <- isnt.null(metric) & isnt.null(N_1) & isnt.null(N_0) &\n\t\t\t\t\t\t est_low_up_ok & isnt.null(conf.level)\n\tif (!inputs_sufficient) {\n\t\tprint(\"Not enough inputs to back-calculate binary table!, exiting gimpute.bin.data..\")\n\t\treturn(list(FAIL=NA))\n\t}\n\t\n\t# Convert NULL to NA (we know the other values are not NULL already)\n\tif (is.null(est)) est <- NA\n\tif (is.null(lower)) lower <- NA\n\tif (is.null(upper)) upper <- NA\n\t\n\talpha <- 1.0-(conf.level/100.0)\n\tmult <- abs(qnorm(alpha/2.0))\n\tn <- N_0 + N_1\n\t\n\t# Calculates the estimate, low, and high if one of the three is NA, assumes\n\t# symmetric distribution\n\tcalc.d.and.b <- function (d=NA, d_L=NA, d_U=NA) {\n\t\tif (is.na(d)) d <- (d_L + d_U)/2;\n\t\tif (is.na(d_U)) d_U <- 2*d - d_L;\n\t\tif (is.na(d_L)) d_L <- 2*d - d_U;\n\t\t\n\t\tb <- ((d_U - d) / mult)^2\n\t\tres <- list(d=d, d_U=d_U, d_L=d_L, b=b)\n\t}\n\n\timpute.from.RD <- function () {\n\t\tres <- calc.d.and.b(d=est, d_L=lower, d_U=upper)\n\t\td <- res[[\"d\"]]; b <- res[[\"b\"]]\n\t\t\n\t\tA <- n;\n\t\tB <- (2*N_0*d-n);\n\t\tC <- N_0*(N_1*b-d*(1-d));\n\t\t\n\t\t# calculate proportions\n\t\tp0.op1 <- (-B+sqrt(B^2-4*A*C))/(2*A)\n\t\tp0.op2 <- (-B-sqrt(B^2-4*A*C))/(2*A)\n\t\tp1.op1 <- d + p0.op1\n\t\tp1.op2 <- d + p0.op2\n\n\t\tres <- list(op1=list(p0=p0.op1, p1=p1.op1), op2=list(p0=p0.op2, p1=p1.op2))\n\t}\n\t\n\timpute.from.LOR <- function () {\n\t\tres <- calc.d.and.b(d=log(est), d_L=log(lower), d_U=log(upper))\n\t\td <- res[[\"d\"]]; b <- res[[\"b\"]]\n\t\t\n\t\td <- exp(d) # convert OR back to normal scale (not log)\n\t\t\n\t\tA <- N_0*(1-d)^2+b*d*N_0*N_1\n\t\tB <- -1*(2*N_0*(1-d)+b*d*N_0*N_1)\n\t\tC <- N_0 + d*N_1\n\t\t\n\t\t# calculate proportions\n\t\tp0.op1 <- (-B+sqrt(B^2-4*A*C))/(2*A) \n\t\tp0.op2 <- (-B-sqrt(B^2-4*A*C))/(2*A) \n\t\tp1.op1 <- d*p0.op1/(d*p0.op1+1-p0.op1)\n\t\tp1.op2 <- d*p0.op2/(d*p0.op2+1-p0.op2)\n\t\t\n\t\tres <- list(op1=list(p0=p0.op1, p1=p1.op1), op2=list(p0=p0.op2, p1=p1.op2))\n\t\treturn(res)\n\t}\n\t\n\timpute.from.LRR <- function () {\n\t\tres <- calc.d.and.b(d=log(est), d_L=log(lower), d_U=log(upper))\n\t\td <- res[[\"d\"]]; b <- res[[\"b\"]]\n\t\t\n\t\td <- exp(d)\n\t\t\n\t\t# calculate proportions\n\t\tp0.op1 <- (N_0+d*N_1)/(d*(b*N_1*N_0+N_1+N_0))\n\t\tp1.op1 <- p0.op1*d\n\t\t\n\t\tres <- list(op1=list(p0=p0.op1, p1=p1.op1))\n\t}\n\t\n\tres <- switch(metric, \"RD\"=impute.from.RD(),\n\t\t\t \"OR\"=impute.from.LOR(),\n\t\t\t\t\t\t \"RR\"=impute.from.LRR())\n\t\n\t# calculate counts for each option\n\t# Option 1:\n\ta <- res$op1$p1 * N_1; a <- round(a, digits=0);\n\tb <- N_1\n\tc <- res$op1$p0 * N_0; c <- round(c, digits=0);\n\td <- N_0\n\top1 <- list(a=a, b=b, c=c, d=d)\n\t# Test for valid answers\n\tif (is.nan(a)|is.nan(b)|is.nan(c)|is.nan(d)) {\n\t\treturn(list(FAIL=NA))\n\t}\n\t\n\t# Option 2:\n\tif (isnt.null(res$op2)) {\n\t\ta <- res$op2$p1 * N_1; a <- round(a, digits=0);\n\t\tb <- N_1 \n\t\tc <- res$op2$p0 * N_0; c <- round(c, digits=0);\n\t\td <- N_0\n\t\top2 <- list(a=a, b=b, c=c, d=d)\n\t\t# Test for valid answers\n\t\tif (is.nan(a)|is.nan(b)|is.nan(c)|is.nan(d)) {\n\t\t\treturn(list(FAIL=NA))\n\t\t}\n\t}\n\telse {\n\t\top2 <- NULL;\n\t}\n\n\tif (is.null(op2)) {\n\t\tres <- list(op1=op1)\n\t}\n\telse {\n\t\tres <- list(op1=op1, op2=op2)\n\t}\n\t\n\t\n\treturn(res)\n}\n\n#################################################\n# #\n# Continuous data calculation #\n# --- #\n# The following code is due to Tom Trikalinos. #\n# Originally in fillin.continuous.r file. #\n# #\n#################################################\ncheck.1spell.res <- function(n, se) {\n succeeded <- TRUE\n comment <- \"\"\n\n if (!is.na(n)) {\n if (n<=1) {\n comment <- \"n<=1\"\n succeeded <- FALSE\n } \n }\n\n\tif (!is.na(se)) {\n\t if (se<=0) {\n\t comment <- paste(\"se<=0\", comment, sep=\", \")\n\t succeeded <- FALSE\n\t\t}\n\t}\n\n return(list(succeeded=succeeded, comment=comment))\n\n}\n\n\n########################################################################################\n########################################################################################\n########################################################################################\n########################################################################################\nfillin.cont.1spell <- function(n=NA, mean=NA, sd=NA, se=NA, var=NA, \n low=NA, high=NA, pval=NA, alpha=0.05) { \n\t# var is the SAMPLE variance NOT sampling variance:\n\t# var = sd^2 NOT se^2\n\t# se = sd/sqrt(n)\n succeeded <- FALSE \n comment <- \"\"\n res <- list(succeeded=succeeded)\n\n z <- abs(qnorm(alpha/2))\n\n input.vector <- c(n, mean, sd, se, var, low, high, pval)\n input.pattern <- !(is.na(input.vector))\n\t\n\tget.mean <- function(high=NA, low=NA) {\n\t\tif(is.na(mean))\n\t\t\tmean = (high+low)/2\n\t\treturn(mean)\n\t}\n\t\n\tget.se <- function(sd=NA, n=NA, low=NA, high=NA, mean=NA, pval=NA) {\t\t\n\t\t# try the sd and the n\n\t\tif(is.na(se))\n\t\t\tse <- try( sd/sqrt(n) , silent=TRUE)\n\t\t\n\t\t# try both ends of the CI\n\t\tif(is.na(se))\n\t\t\tse <- try( abs(high-low)/(2*z) ,silent=TRUE)\n\t\t\n\t\t# try low end of CI\n\t\tif(is.na(se))\n\t\t\tse <- try( abs(mean-low)/z ,silent=TRUE)\n\t\t\n\t\t# try high end of CI\n\t\tif(is.na(se))\n\t\t\tse <- try( abs(high-mean)/z ,silent=TRUE)\n\t\t\n\t\t# try the 2 sided p-value for the mean != 0\n\t\tif(is.na(se))\n\t\t\tse <- try( abs(mean)/abs(qnorm(pval/2)) ,silent=TRUE)\n\t\t\n\t\treturn(se)\n\t}\n\t\n\tget.var <- function(sd=NA) {\n\t\t# try sd\n\t\tif (is.na(var))\n\t\t\tvar <- try( sd^2 , silent=TRUE)\n\t\treturn(var)\n\t}\n\t\n\tget.sd <- function(var=NA, n=NA, se=NA) {\n\t\t# try var\n\t\tif (is.na(sd))\n\t\t\tsd <- try( sqrt(var) ,silent=TRUE)\n\t\n\t\t# try se and n\n\t\tif (is.na(sd))\n\t\t\tsd <- try( sqrt(n)*se ,silent=TRUE)\n\t\t\n\t\treturn(sd)\n\t}\n\t\n\tget.n <- function(sd=NA, se=NA, var=NA) {\n\t\tif (is.na(n))\n\t\t\tn <- (sd/se)^2\n\t\tif (is.na(n))\n\t\t\tn <- var/(se^2)\n\t\treturn(n)\n\t}\n\t\n\tdirty <- TRUE\n\twhile (dirty) {\n\t\tprint(\"Iterating in fillin.cont1\")\n\t\tdirty <- FALSE\n\t\t\n\t ##########################################################\n\t # check the mean first \n\t # If not calculate it from the CI\n\t\tif (is.na(mean)) {\n\t \tmean <- get.mean(high=high, low=low)\n\t\t\tif (!is.na(mean)) {\n\t\t\t\tdirty <- TRUE # mean was changed\n\t\t\t\tprint(\"changed mean\")\n\t\t\t}\n\t\t\t\t\n\t\t}\t\n\t ##########################################################\n\t # if se is missing\n\t\tif (is.na(se)) {\n\t\t\tse <- get.se(sd=sd, n=n, low=low, high=high, mean=mean, pval=pval)\n\t\t\tif (!is.na(se)) {\n\t\t\t\tdirty <- TRUE # se was changed\n\t\t\t\tprint(\"changed se\")\n\t\t\t}\n\t\t}\n\t ##########################################################\n\t # if the SAMPLE variance is missing\n\t\tif (is.na(var)) {\n\t\t\tvar <- get.var(sd=sd)\n\t\t\tif (!is.na(var)) {\n\t\t\t\tdirty <- TRUE # var was changed\n\t\t\t\tprint(\"changed var\")\n\t\t\t}\n\t\t}\n\t ##########################################################\n\t # if the lower CI is missing \n\t if(is.na(low)) {\n\t low <- mean - z*se\n\t\t\tif (!is.na(low)) {\n\t\t\t\tdirty <- TRUE # low was changed\n\t\t\t\tprint(\"changed low\")\n\t\t\t}\n\t }\n\t ##########################################################\n\t # if the high CI is missing \n\t if(is.na(high)) {\n\t high <- mean + z*se\n\t\t\tif (!is.na(high)) {\n\t\t\t\tdirty <- TRUE # high was changed\n\t\t\t\tprint(\"changed high\")\n\t\t\t}\n\t }\n\t ##########################################################\n\t # if the 2 sided pval is missing \n\t if(is.na(pval)) {\n\t pval <- 2*pnorm(-abs(mean/se))\n\t\t\tif (!is.na(pval)) {\n\t\t\t\tdirty <- TRUE # pval was changed\n\t\t\t\tprint(\"changed pval\")\n\t\t\t}\n\t }\n\t ##########################################################\n\t # if the sd is missing \n\t if(is.na(sd)) {\n\t sd = get.sd(var=var, n=n, se=se)\n\t\t\tif (!is.na(sd)) {\n\t\t\t\tdirty = TRUE # sd was changed\n\t\t\t\tprint(\"changed sd\")\n\t\t\t}\n\t\t}\n\t ##########################################################\n\t # if the n is missing \n\t\tif(is.na(n)) {\n\t\t\tn <- get.n(sd=sd, se=se, var=var)\n\t\t\tif (!is.na(n)) {\n\t\t\t\tdirty <- TRUE # sd was changed\n\t\t\t\tprint(\"changed n\")\n\t\t\t}\n\t\t}\n\t\t\n\t} # finished iterating\n\n\tsucceeded <- check.1spell.res(n=n, se=se)$succeeded\n\tcomment <- check.1spell.res(n=n, se=se)$comment\n\t\n\t# Do checks:\n\tif (is.na(mean)) {\n\t\tcomment <- paste(comment, \"no info on mean\", sep=\"|\")\n\t\t#return(c(res, comment=comment))\n\t}\n\t# if the se is still missing, then abort \n\tif (is.na(se)) {\n\t\tcomment <- paste(comment, \"no info on dispersion\", sep=\"|\")\n\t\t#return(c(res, comment=comment))\n\t}\n\tif(is.na(sd)) {\n\t\tcomment <- paste(comment, \"{n & sd} missing\")\n\t}\n\t\n output.vector <- c(n, mean, sd, se, var, low, high, pval)\n output.names <- c(\"n\", \"mean\", \"sd\", \"se\", \"var\", \"low\", \"high\", \"pval\")\n names(output.vector) <- output.names\n\n res<- list(succeeded=succeeded, input.pattern=input.pattern, output=output.vector, comment=comment)\n return(res)\n\n}\n\n\nfillin.missing.effect.quantity <- function(est=NA, low=NA, high=NA) {\n\t# Assumes CI is symmetric around estimate\n\t\n\t# low = est - diff, est, high = est + diff\n\tdiff <- high-est\n\tif (is.na(diff))\n\t\tdiff <- est - low\n\t\n\tif (is.na(est))\n\t\test <- (high-low)/2.0\n\t\n\tif (is.na(low))\n\t\tlow <- est - diff\n\t\n\tif (is.na(high))\n\t\thigh <- est + diff\n\t\n\treturn(list(est=est, low=low, high=high))\n}\n\ngimpute.cont.data <- function(group1, group2, effect_data, conf.level=95.0) {\n\t# Tries to solve for one of n1,n2, mean1, mean2, sd1, sd2 based on the data\n\t# in group1, group2, effect_data\n\t\n\t# Get 'more' local copies\n\tn1 <- group1[[\"n\"]]\n\tn2 <- group2[[\"n\"]]\n\tmean1 <- group1[[\"mean\"]]\n\tmean2 <- group2[[\"mean\"]]\n\tsd1 <- group1[[\"sd\"]]\n\tsd2 <- group2[[\"sd\"]]\n\test <- effect_data[[\"est\"]]\n\tlow <- effect_data[[\"low\"]]\n\thigh <- effect_data[[\"high\"]]\n\tmetric <- effect_data[[\"metric\"]]\n\tmet.param <- effect_data[[\"met.param\"]] # metric specific-parameter\n\t\t\t\n\t# Convert nulls to NA\n\tif (is.null(n1)) n1 <- NA\n\tif (is.null(n2)) n2 <- NA\n\tif (is.null(mean1)) mean1 <- NA\n\tif (is.null(mean2)) mean2 <- NA\n\tif (is.null(sd1)) sd1 <- NA\n\tif (is.null(sd2)) sd2 <- NA\n\tif (is.null(est)) est <- NA\n\tif (is.null(low)) low <- NA\n\tif (is.null(high)) high <- NA\n\tif (is.null(metric)) metric <- NA\n\tif (is.null(met.param)) met.param <- NA\n\tif (is.null(conf.level)) conf.level <- NA\n\t\n\tmetric <- as.character(metric)\n\t\n\t# Can't do anything if we don't know what metric we are using or if we don't\n\t# know the conf.level\n\tif (is.na(metric) | is.na(conf.level) | is.na(met.param)) {\n\t\treturn(list(\"FAIL\"=NA))\n\t}\n\t\n\teffect.and.ci <- fillin.missing.effect.quantity(est=est, low=low, high=high)\n\test <- effect.and.ci[[\"est\"]]\n\tlow <- effect.and.ci[[\"low\"]]\n\thigh <- effect.and.ci[[\"high\"]]\n\t\n\t# Obtain standard error and variance from CI\n\talpha <- 1.0-(conf.level/100.0)\n\tmult <- abs(qnorm(alpha/2.0))\n\tse <- (high-low)/(2*mult)\n\tvar = se^2\n\t\n\t#print(\"se: \"); print(se);\n\t#print(\"var: \"); print(var);\n\t\t\n\tfilter_neg_result <- function(res.vector) {\n\t\t# Ignore negative results, complex number results, and condense all NAs to a single one\n\t\tres.vector <- res.vector[!is.na(res.vector)]\n\t\tres.vector <- res.vector[Re(res.vector) > 0]\n\t\tres.vector <- res.vector[abs(Im(res.vector)) < IMAGINARY.THRESHOLD] # imaginary part is very close to zero\n\t\tres.vector <- Re(res.vector)\n\t\t\n\t\t#print(\"imaginary\"); print(Im(res.vector))\n\t\tif (length(res.vector)==0)\n\t\t\tres.vector <- NA;\n\t\treturn(res.vector)\n\t}\n\t\n\timpute.from.MD <- function() {\n\t\tprint(\"From MD\")\n\t\t# Formulas from \"The Handbook of Research Synthesis and Meta-Analysis\"\n\t # 2nd Ed. p. 224\n\t\t\n\t\t#######################################################################\n\t\t# If one of the means is missing, solve for other mean\n\t\t# If we are in here, we already know the effect is mean difference\n\t # D = (mean of group 1) - (mean of group 2)\n\t\tD <- est; Y1 <- mean1; Y2 <- mean2;\n\t\t\n\t\tif (is.na(Y1) & isnt.na(Y2))\n\t\t\tY1 <- D + Y2\n\t\tif (is.na(Y2) & isnt.na(Y1))\n\t\t\tY2 <- Y1 - D\n\t\t#######################################################################\n\t\t# For MD, the metric parameter is the assumption that the population SDs\n\t # are the same:\n\t\t# met.param == TRUE # population SDs are the same\n\t # met.param == FALSE # population SDs are not the same\n\t\tif (met.param) { # population SDs are the same\n\t\t\tprint(\"Assuming population SDs are the same\")\n\t\t\tif (is.na(n1)) {\n\t\t\t\t#print(\"n1 is na\")\n\t\t\t\tn1.op1 <- (1/2)*(n2*sd1^2-sd1^2-var*n2^2+2*var*n2+sd2^2*n2-sd2^2+sqrt(var^2*n2^4-4*var^2*n2^3+4*var^2*n2^2+sd1^4+sd2^4+n2^2*sd1^4+2*n2*sd1^4+2*sd1^2*sd2^2+sd2^4*n2^2-2*sd2^4*n2-2*n2^3*sd1^2*var+2*n2^2*sd1^2*var-2*n2^2*sd1^2*sd2^2-4*sd1^2*var*n2+2*var*n2^3*sd2^2+2*var*n2^2*sd2^2-4*var*n2*sd2^2))/(-sd1^2+var*n2)\n\t\t\t\tn1.op2 <- -(1/2)*(-n2*sd1^2+sd1^2+var*n2^2-2*var*n2-sd2^2*n2+sd2^2+sqrt(var^2*n2^4-4*var^2*n2^3+4*var^2*n2^2+sd1^4+sd2^4+n2^2*sd1^4+2*n2*sd1^4+2*sd1^2*sd2^2+sd2^4*n2^2-2*sd2^4*n2-2*n2^3*sd1^2*var+2*n2^2*sd1^2*var-2*n2^2*sd1^2*sd2^2-4*sd1^2*var*n2+2*var*n2^3*sd2^2+2*var*n2^2*sd2^2-4*var*n2*sd2^2))/(-sd1^2+var*n2)\n\t\t\t\tprint(\"n1op1\"); print(n1.op1);\n\t\t\t\tprint(\"n1op2\"); print(n1.op2);\n\t\t\t\tn1.op1 <- round(n1.op1, digits = 0)\n\t\t\t\tn1.op2 <- round(n1.op2, digits = 0)\n\t\t\t\tn1 <- filter_neg_result(c(n1.op1,n1.op2))\n n1 <- round(n1)\n\t\t\t}\n\t\t\tif (is.na(n2)) {\n\t\t\t\t#print(\"n2 is na\")\n\t\t\t\tn2.op1 <- (1/2)*(n1*sd2^2-var*n1^2+2*var*n1+sd1^2*n1-sd1^2-sd2^2+sqrt(sd1^4+sd2^4+2*sd1^2*sd2^2+n1^2*sd2^4+2*n1*sd2^4+var^2*n1^4-4*var^2*n1^3+4*var^2*n1^2+sd1^4*n1^2-2*sd1^4*n1-2*n1^3*sd2^2*var+2*n1^2*sd2^2*var-2*n1^2*sd2^2*sd1^2+2*var*n1^3*sd1^2+2*var*n1^2*sd1^2-4*var*n1*sd1^2-4*var*n1*sd2^2))/(var*n1-sd2^2)\n\t\t\t\tn2.op2 <- -(1/2)*(-n1*sd2^2+var*n1^2-2*var*n1-sd1^2*n1+sd1^2+sd2^2+sqrt(sd1^4+sd2^4+2*sd1^2*sd2^2+n1^2*sd2^4+2*n1*sd2^4+var^2*n1^4-4*var^2*n1^3+4*var^2*n1^2+sd1^4*n1^2-2*sd1^4*n1-2*n1^3*sd2^2*var+2*n1^2*sd2^2*var-2*n1^2*sd2^2*sd1^2+2*var*n1^3*sd1^2+2*var*n1^2*sd1^2-4*var*n1*sd1^2-4*var*n1*sd2^2))/(var*n1-sd2^2)\n\t\t\t\tn2.op1 <- round(n2.op1, digits=0)\n\t\t\t\tn2.op2 <- round(n2.op2, digits=0)\n\t\t\t\tn2 <- filter_neg_result(c(n2.op1, n2.op2))\n n2 <- round(n2)\n\t\t\t}\n\t\t\tif (is.na(sd1)) {\n\t\t\t\t#print(\"sd1 is na\")\n\t\t\t\tsd1.op1 <- sqrt((n1^2-n1+n1*n2-n2)*(var*n1^2*n2+var*n1*n2^2-2*var*n1*n2-n1*sd2^2*n2+n1*sd2^2-sd2^2*n2^2+sd2^2*n2))/(n1^2-n1+n1*n2-n2)\n\t\t\t\tsd1.op2 <- -sqrt((n1^2-n1+n1*n2-n2)*(var*n1^2*n2+var*n1*n2^2-2*var*n1*n2-n1*sd2^2*n2+n1*sd2^2-sd2^2*n2^2+sd2^2*n2))/(n1^2-n1+n1*n2-n2)\n\t\t\t\tsd1 <- filter_neg_result(c(sd1.op1, sd1.op2))\n\t\t\t}\n\t\t\tif (is.na(sd2)) {\n\t\t\t\t#print(\"sd2 is na\")\n\t\t\t\tsd2.op1 <- sqrt((n1*n2-n1+n2^2-n2)*(var*n1^2*n2+var*n1*n2^2-2*var*n1*n2-sd1^2*n1^2+sd1^2*n1-n2*sd1^2*n1+n2*sd1^2))/(n1*n2-n1+n2^2-n2)\n\t\t\t\tsd2.op2 <- -sqrt((n1*n2-n1+n2^2-n2)*(var*n1^2*n2+var*n1*n2^2-2*var*n1*n2-sd1^2*n1^2+sd1^2*n1-n2*sd1^2*n1+n2*sd1^2))/(n1*n2-n1+n2^2-n2)\n\t\t\t\tsd2 <- filter_neg_result(c(sd2.op1, sd2.op2))\n\t\t\t}\n\t\t}\n\t\telse { # population SDs are not the same\n\t\t\tprint(\"Not assuming population SDs are the same\")\n\t\t\tif (is.na(n1)) {\n\t\t\t\t#print(\"n1 is na\")\n\t\t\t\tn1 <- n2*sd1^2/(var*n2-sd2^2)\n n1 <- round(n1)\n\t\t\t}\n\t\t\tif (is.na(n2)) {\n\t\t\t\t#print(\"n2 is na\")\n\t\t\t\tn2 <- n1*sd2^2/(var*n1-sd1^2)\n n2 <- round(n2)\n\t\t\t}\n\t\t\tif (is.na(sd1)) {\n\t\t\t\t#print(\"sd1 is na\")\n\t\t\t\tsd1.op1 <- sqrt(n2*n1*(var*n2-sd2^2))/n2\n\t\t\t\tsd1.op2 <- -sqrt(n2*n1*(var*n2-sd2^2))/n2\n\t\t\t\tsd1 <- filter_neg_result(c(sd1.op1, sd1.op2))\n\t\t\t}\n\t\t\tif (is.na(sd2)) {\n\t\t\t\t#print(\"sd2 is na\")\n\t\t\t\tsd2.op1 <- sqrt(n1*n2*(var*n1-sd1^2))/n1\n\t\t\t\tsd2.op2 <- -sqrt(n1*n2*(var*n1-sd1^2))/n1\n\t\t\t\tsd2 <- filter_neg_result(c(sd2.op1, sd2.op2))\n\t\t\t}\n\t\t}\n\t\t\n\t\tres <- list(n1=n1, n2=n2, mean1=Y1, mean2=Y2, sd1=sd1, sd2=sd2)\n\t\treturn(res)\n\t} # end of impute.from.MD\n\t\n\timpute.from.SMD <- function() {\n\t\tprint(\"From SMD\")\n\t\t#######################################################################\n\t\t# If one of the means is missing\t\n\t\tsdw <- sqrt(((n1-1)*sd1^2+(n2-1)*sd2^2)/(n1+n2-2)) # within-groups sd\n\t\tD <- est; Y1 <- mean1; Y2 <- mean2;\n\t\t\n\t\tif (is.na(Y1)) Y1 <- D*sdw+Y2\n\t\tif (is.na(Y2)) Y2 <- -D*sdw+Y1\n\t\t#######################################################################\n\t\t# First try some stuff that does not depend on the metric parameter\n\t\t# From formula: d=(Y1-Y2)/SW, SW^2=((n1-1)*sd1^2+(n2-1)*sd2^2)/(n1+n2-2)\n\t\tif (is.na(n1)) {\n\t\t\tn1 <- -(-sd1^2*D^2+sd2^2*n2*D^2-sd2^2*D^2-n2*Y1^2+2*n2*Y1*Y2-n2*Y2^2+2*Y1^2-4*Y1*Y2+2*Y2^2)/(sd1^2*D^2-Y1^2+2*Y1*Y2-Y2^2)\n n1 <- round(n1)\n\t\t}\n\t\tif (is.na(n2)) {\n\t\t\tn2 <- -(sd1^2*n1*D^2-sd1^2*D^2-sd2^2*D^2-n1*Y1^2+2*n1*Y1*Y2-n1*Y2^2+2*Y1^2-4*Y1*Y2+2*Y2^2)/(sd2^2*D^2-Y1^2+2*Y1*Y2-Y2^2)\n n2 <- round(n2)\n\t\t}\n\t\tif (is.na(sd1)) {\n\t\t\tsd1.op1 <- (sqrt(-(n1-1)*(-n1*Y1^2+2*n1*Y1*Y2+sd2^2*n2*D^2-sd2^2*D^2+2*n2*Y1*Y2-n2*Y2^2-n1*Y2^2-n2*Y1^2+2*Y2^2+2*Y1^2-4*Y1*Y2)))/((n1-1)*D)\n\t\t\tsd1.op2 <- -(sqrt(-(n1-1)*(-n1*Y1^2+2*n1*Y1*Y2+sd2^2*n2*D^2-sd2^2*D^2+2*n2*Y1*Y2-n2*Y2^2-n1*Y2^2-n2*Y1^2+2*Y2^2+2*Y1^2-4*Y1*Y2)))/((n1-1)*D)\n\t\t\tsd1 <- filter_neg_result(c(sd1.op1, sd1.op2))\n\t\t}\n\t\tif (is.na(sd2)) {\n\t\t\tsd2.op1 <- (sqrt(-(n2-1)*(sd1^2*n1*D^2-sd1^2*D^2-n1*Y2^2-n2*Y1^2-n1*Y1^2+2*n1*Y1*Y2+2*Y1^2-4*Y1*Y2+2*n2*Y1*Y2-n2*Y2^2+2*Y2^2)))/((n2-1)*D)\n\t\t\tsd2.op2 <- -(sqrt(-(n2-1)*(sd1^2*n1*D^2-sd1^2*D^2-n1*Y2^2-n2*Y1^2-n1*Y1^2+2*n1*Y1*Y2+2*Y1^2-4*Y1*Y2+2*n2*Y1*Y2-n2*Y2^2+2*Y2^2)))/((n2-1)*D)\n\t\t\tsd2 <- filter_neg_result(c(sd2.op1, sd2.op2))\n\t\t}\n\t\t#######################################################################\n\t\t# For SMD, the metric parameter is whether hedges g is used\n\t\t# met.param == TRUE # SMD is Hedges' g (corrected bias) (default)\n\t # met.param == FALSE # SMD has uncorrected bias\n\t\tif (met.param) { # using Hedges' g\n\t\t\tprint(\"Assuming SMD is Hedges' g\")\n\t\t\tif (is.na(n1)) {\n\t\t\t\ttryCatch({n1 <- polyroot(c(96*n2^3-16*n2^4-144*n2^2, (81*var*n2^2-72*var*n2^3+48*D^2*n2^2-72*D^2*n2+16*var*n2^4-288*n2-64*n2^3+288*n2^2-8*n2^3*D^2), (48*D^2*n2+48*var*n2^3+288*n2-16*D^2*n2^2-144-144*var*n2^2+81*var*n2-96*n2^2), (96+48*var*n2^2-64*n2-8*D^2*n2-72*var*n2), (16*var*n2-16)));\n\t\t\t\t\t}, error = function(e) {\n\t\t\t\t\t\t#print(e);\n\t\t\t\t\t\tn1 <- NA;\n\t\t\t\t\t});\n\t\t\t\tn1 <- filter_neg_result(n1)\n \t\tn1 <- round(n1)\n\t\t\t}\n\t\t\tif (is.na(n2)) {\n\t\t\t\ttryCatch({ n2 <- polyroot(c(96*n1^3-16*n1^4-144*n1^2, (81*var*n1^2-72*var*n1^3+48*D^2*n1^2-72*D^2*n1+16*var*n1^4-288*n1-64*n1^3+288*n1^2-8*n1^3*D^2), (48*D^2*n1+48*var*n1^3+288*n1-16*D^2*n1^2-144-144*var*n1^2+81*var*n1-96*n1^2), (96+48*var*n1^2-64*n1-8*D^2*n1-72*var*n1), (16*var*n1-16)));\n\t\t\t\t\t}, error = function(e) {\n\t\t\t\t\t\t#print(e);\n\t\t\t\t\t\tn2 <- NA;\n\t\t\t\t\t});\n\t\t\t\tn2 <- filter_neg_result(n2)\n \t\t\tn2 <- round(n2)\n\t\t\t}\n\t\t}\n\t\telse { # not using Hedges' g\n\t\t\tif (is.na(n1)) {\n\t\t\t\tn1.op1 <- (1/4)*(-2*var*n2+4+D^2+sqrt(4*var^2*n2^2-4*D^2*n2*var+8*D^2+D^4))*n2/(var*n2-1)\n\t\t\t\tn1.op2 <- -(1/4)*(2*var*n2-4-D^2+sqrt(4*var^2*n2^2-4*D^2*n2*var+8*D^2+D^4))*n2/(var*n2-1)\n\t\t\t\tn1.op1 <- round(n1.op1, digits = 0)\n\t\t\t\tn1.op2 <- round(n1.op2, digits = 0)\n\t\t\t\tn1 <- filter_neg_result(c(n1.op1,n1.op2))\n n1 <- round(n1)\n\t\t\t}\n\t\t\tif (is.na(n2)) {\n\t\t\t\tn2.op1 <- (1/4)*(-2*var*n1+D^2+4+sqrt(4*var^2*n1^2-4*var*n1*D^2+D^4+8*D^2))*n1/(-1+var*n1)\n\t\t\t\tn2.op2 <- -(1/4)*(2*var*n1-D^2-4+sqrt(4*var^2*n1^2-4*var*n1*D^2+D^4+8*D^2))*n1/(-1+var*n1)\n\t\t\t\tn2.op1 <- round(n2.op1, digits=0)\n\t\t\t\tn2.op2 <- round(n2.op2, digits=0)\n\t\t\t\tn2 <- filter_neg_result(c(n2.op1, n2.op2))\n n2 <- round(n2)\n\t\t\t}\n\t\t}\n\t\t\n\t\tres <- list(n1=n1, n2=n2, mean1=Y1, mean2=Y2, sd1=sd1, sd2=sd2)\n\t\treturn(res)\n\t} # end of impute.from.smd\n\t\n\tres <- switch(metric, \"MD\"=impute.from.MD(), \"SMD\"=impute.from.SMD())\n\treturn(res)\n}\n\n########################################################################################\n# Tom goes a bit overboard with the '#'s :) #########################################################\n########################################################################################\n########################################################################################\nfillin.cont.AminusB <- function(\n n.A=NA, mean.A=NA, sd.A=NA, se.A=NA, var.A=NA, low.A=NA, high.A=NA, pval.A=NA, \n n.B=NA, mean.B=NA, sd.B=NA, se.B=NA, var.B=NA, low.B=NA, high.B=NA, pval.B=NA,\n correlation = 0, alpha=0.05, metric=NA) {\n\n\tmetric <- as.character(metric)\n\n succeeded <- TRUE \n comment <- \"\"\n res <- list(succeeded= succeeded)\n \n #######\n # anything that's returned needs to be initialized to NA here\n #\n n.diff <- NA\n mean.diff <- NA\n sd.diff <- NA\n se.diff <- NA\n var.diff <- NA\n low.diff <- NA\n high.diff <- NA\n pval.diff <-NA\n \n z <- abs(qnorm(alpha/2))\n\n input.vector.A <- c(n.A, mean.A, sd.A, se.A, var.A, low.A, high.A, pval.A) \n input.vector.B <- c(n.B, mean.B, sd.B, se.B, var.B, low.B, high.B, pval.B)\n\t\n\tcat(\"input.vector.A: \",input.vector.A,\"\\ninput.vector.B: \",input.vector.B,\"\\n\")\n\t\n input.pattern <- list(A=!(is.na(input.vector.A)), B=!(is.na(input.vector.B)))\n\n fillin.A <- fillin.cont.1spell(n.A, mean.A, sd.A, se.A, var.A, low.A, high.A, pval.A, alpha=alpha)\n comment <-paste(comment, paste(\"A\", fillin.A$comment, sep=\":\"), sep=\"|\")\n\n fillin.B <- fillin.cont.1spell(n.B, mean.B, sd.B, se.B, var.B, low.B, high.B, pval.B, alpha=alpha)\n comment <-paste(comment, paste(\"B\", fillin.B$comment, sep=\":\"), sep=\"|\")\n\n\tfillins.succeeded <- identical( c(fillin.A$succeeded,fillin.B$succeeded), c(TRUE, TRUE))\n\t\n\t\n\tif (isnt.na(fillin.A$output[\"n\"]) & is.na(fillin.B$output[\"n\"])) {\n\t\tfillin.B$output[\"n\"] <- fillin.A$output[\"n\"];\n\t}\n\telse if (isnt.na(fillin.B$output[\"n\"]) & is.na(fillin.A$output[\"n\"])) {\n\t\tfillin.A$output[\"n\"] <- fillin.B$output[\"n\"];\n\t}\n\t\n\tnA.eq.nB <- identical(fillin.A$output[\"n\"], fillin.B$output[\"n\"])\n\t\n # you do not need to tryCatch here\n if (fillins.succeeded & nA.eq.nB) {\n\t\tn.diff <- fillin.A$output[\"n\"]\n\t\tY1 <- fillin.A$output[\"mean\"]\n\t\tY2 <- fillin.B$output[\"mean\"]\n\t\tS1 <- fillin.A$output[\"sd\"]\n\t\tS2 <- fillin.B$output[\"sd\"]\n\t\tr <- correlation\n\t\t\n\t\tS.difference = sqrt( (S1^2)+(S2^2)-(2*r*S1*S2) )\n\t\t\n\n\t\tif (metric==\"MD\" || metric==\"SMD\") {\n\t\t\t# From Handbook of Research Synthesis and Meta-Analysis 2E p.225\n\t\t\tmean.diff <- Y2-Y1\n\t\t\tsd.diff <- S.difference\n\t\t\t# var.diff <- sd.diff^2/n # not used due to clash in formulas w/fillin.cont.1spell assuming a sample variance\n\t\t\tfillin.diff <- fillin.cont.1spell(n=n.diff, mean=mean.diff, sd=sd.diff, alpha=alpha)\n\t\t\tcat(\"Fillin.diff result: \", fillin.diff$output,\"\\n\")\n\t\t\tif (fillin.diff$succeeded) {\n\t\t\t\tse.diff <- fillin.diff$output[\"se\"]\n\t\t\t\tvar.diff <- fillin.diff$output[\"var\"]\n\t\t\t\tlow.diff <- fillin.diff$output[\"low\"]\n\t\t\t\thigh.diff <- fillin.diff$output[\"high\"]\n\t\t\t\tpval.diff <- fillin.diff$output[\"pval\"]\n\t\t\t}\n\t\t}\n } else {\n\t\tif (!nA.eq.nB)\n\t\t\tcomment <- paste(comment, \" n.A != n.B\")\n succeeded <- FALSE \n }\n\t\n\t\n\n output.vector <- c(n.diff, mean.diff, sd.diff, se.diff, var.diff, low.diff, high.diff, pval.diff)\n output.names <- c( \"n\", \"mean\", \"sd\", \"se\", \"var\", \"low\", \"high\", \"pval\")\n\t\n\tcat(\"Output vector: \",output.vector,\"\\nOutput.names: \",output.names,\"\\n\")\n\t\n names(output.vector) <- output.names\n #names(fillin.A) <- output.names\n #names(fillin.B) <- output.names\n res<- list(succeeded=succeeded, input.pattern=input.pattern, output=output.vector, \n pre=fillin.A$output, post=fillin.B$output,\n comment=comment, correlation=correlation)\n\n return(res)\n\n}\n\n################################ \n# Diagnostic data calculation #\n################################\ngimpute.diagnostic.data <- function(diag.data) {\n\t# imputes diagnostic data (2x2 tables) from fields in diag.data data frame\n\t# paramater. This will include (perhaps):\n\t# TP, FN, TN, FP, N, prev, sens, sens.lb, sens.ub, spec, spec.lb, spec.ub,\n\t# alpha\n\t# Ignore the case #s below, just a way i was working things out\n\t\n\t#initialize local variables\n\t\n\tTP <- NULL; FN <- NULL; TN <- NULL; FP <-NULL;\n\t\t\t\n\tN <- diag.data[[\"total\"]]\n\tprev <- diag.data[[\"prev\"]]\n\tsens <- diag.data[[\"sens\"]]\n\tsens.lb <- diag.data[[\"sens.lb\"]]\n\tsens.ub <- diag.data[[\"sens.ub\"]]\n\tspec <- diag.data[[\"spec\"]]\n\tspec.lb <- diag.data[[\"spec.lb\"]]\n\tspec.ub <- diag.data[[\"spec.ub\"]]\n\tconf.level <- diag.data[[\"conf.level\"]]\n\t\n\tcase2a.condition <- isnt.null(sens) & isnt.null(prev) & isnt.null(N)\n\tcase2b.condition <- isnt.null(spec) & isnt.null(prev) & isnt.null(N)\n\t\n\ttmpA <- isnt.null(sens) & (isnt.null(sens.lb) | isnt.null(sens.ub))\n\ttmpB <- isnt.null(sens.lb) & isnt.null(sens.ub)\n\tcase5a.condition <- (tmpA | tmpB) & isnt.null(conf.level)\n\tcase5b.condition <- case5a.condition & isnt.null(spec) & isnt.null(N)\n\t\n\ttmpA <- isnt.null(spec) & (isnt.null(spec.lb) | isnt.null(spec.ub))\n\ttmpB <- isnt.null(spec.lb) & isnt.null(spec.ub)\n\tcase6a.condition <- (tmpA | tmpB) & isnt.null(conf.level)\n\tcase6b.condition <- case6a.condition & isnt.null(sens) & isnt.null(N)\n\t\n\ttmpA <- isnt.null(sens) & (isnt.null(sens.lb) | isnt.null(sens.ub))\n\ttmpA <- tmpA | (isnt.null(sens.lb) & isnt.null(sens.ub))\n\ttmpB <- isnt.null(spec) & (isnt.null(spec.lb) | isnt.null(spec.ub))\n\ttmpB <- tmpB | (isnt.null(spec.lb) & isnt.null(spec.ub))\n\tcase8a.condition <- tmpA & isnt.null(conf.level)\n\tcase8b.condition <- tmpB & isnt.null(conf.level)\n\t\n\t# Case 2: inputs: sens, spec, prev, N\n\tcase2 <- function(sens, spec, prev, N) {\n\t\tTP <- sens*prev*N\n\t\tFN <- (1-sens)*prev*N\n\t\tFP <- N*(spec-1)*(prev-1)\n\t\tTN <- N*spec*(1-prev)\n\t\t\n\t\tlist(TP=TP,FP=FP,TN=TN,FN=FN)\n\t}\n\t# Case 5: inputs: sens, sens.lb or sens.ub, spec, N, conf.level\n\tcase5 <- function(sens, sens.lb, sens.ub, spec, N, conf.level) {\n\t\tci.data <- list(estimate=sens, lb=sens.lb, ub=sens.ub, conf.level=conf.level)\n\t\test.var <- calc.est.var(ci.data)\n\t\tvarLogitSENS <- est.var$var\n\t\tsens <- est.var$estimate\n\t\t\n\t\tTP = -1/(varLogitSENS*(sens-1))\n\t\tFP = -(-1+spec)*(varLogitSENS*sens^2*N-varLogitSENS*sens*N+1)/(varLogitSENS*sens*(sens-1))\n\t\tTN = spec*(varLogitSENS*sens^2*N-varLogitSENS*sens*N+1)/(varLogitSENS*sens*(sens-1))\n\t\tFN = 1/(varLogitSENS*sens)\n\t\t\n\t\tlist(TP=TP,FP=FP,TN=TN,FN=FN)\n\t}\n\t\n\t\n\t# Case 6: inputs: spec, spec.lb or spec.ub, sens, N, conf.level\n\tcase6 <- function(spec, spec.lb, spec.ub, sens, N, conf.level) {\n\t\tci.data <- list(estimate=spec, lb=spec.lb, ub=spec.ub, conf.level=conf.level)\n\t\test.var <- calc.est.var(ci.data)\n\t\tvarLogitSPEC <- est.var$var\n\t\tspec <- est.var$estimate\n\t\t\n\t\tTP = sens*(-1*varLogitSPEC*spec*N+varLogitSPEC*spec^2*N+1)/(varLogitSPEC*spec*(-1+spec))\n\t\tFP = 1/(varLogitSPEC*spec)\n\t\tTN = -1/(varLogitSPEC*(-1+spec))\n\t\tFN = -(sens-1)*(-1*varLogitSPEC*spec*N+varLogitSPEC*spec^2*N+1)/(varLogitSPEC*spec*(-1+spec))\n\t\t\n\t\tlist(TP=TP,FP=FP,TN=TN,FN=FN)\n\t}\n\t\n\t# Case 8: inputs sens, sens.lb or sens.ub, spec, spec.lb or spec.ub, conf.level\n\tcase8 <- function(sens, sens.lb, sens.ub, spec, spec.lb, spec.ub, conf.level) {\n\t\tci.data <- list(estimate=sens, lb=sens.lb, ub=sens.ub, conf.level=conf.level)\n\t\test.var <- calc.est.var(ci.data)\n\t\tvarLogitSENS <- est.var$var\n\t\tsens <- est.var$estimate\n\t\t\n\t\tci.data <- list(estimate=spec, lb=spec.lb, ub=spec.ub, conf.level=conf.level)\n\t\test.var <- calc.est.var(ci.data)\n\t\tvarLogitSPEC <- est.var$var\n\t\tspec <- est.var$estimate\n\t\t\n\t\tTP = -1/(varLogitSENS*(sens-1))\n\t\tFP = 1/(varLogitSPEC*spec)\n\t\tTN = -1/(varLogitSPEC*(-1+spec))\n\t\tFN = 1/(varLogitSENS*sens)\n\t\n\t\tlist(TP=TP,FP=FP,TN=TN,FN=FN)\n\t}\n\n\n\t\n\tcase2res <- case2(sens, spec, prev, N)\n\tcase5res <- case5(sens, sens.lb, sens.ub, spec, N, conf.level)\n case6res <- case6(spec, spec.lb, spec.ub, sens, N, conf.level)\n\tcase8res <- case8(sens, sens.lb, sens.ub, spec, spec.lb, spec.ub, conf.level)\n\n\t# TP,FN\n\tif (case2a.condition) {\n\t\t#print(\"Entering 2a\")\n\t\tTP <- if(is.null(TP)) case2res$TP\n\t\tFN <- if(is.null(FN)) case2res$FN\n\t} else if (case5a.condition) {\n\t\t#print(\"Entering 5a\")\n\t\tTP <- if(is.null(TP)) case5res$TP\n\t\tFN <- if(is.null(FN)) case5res$FN\n\t} else if (case6b.condition) {\n\t\t#print(\"Entering 6b\")\n\t\tTP <- if(is.null(TP)) case6res$TP\n\t\tFN <- if(is.null(FN)) case6res$FN\n\t} else if (case8a.condition) {\n\t\t#print(\"Entering 8a\")\n\t\tTP <- if(is.null(TP)) case8res$TP\n\t\tFN <- if(is.null(FN)) case8res$FN\n\t}\n\t\n\t# TN,FP\n\tif (case2b.condition) {\n\t\t#print(\"Entering 2b\")\n\t\tTN <- if(is.null(TN)) case2res$TN\n\t FP <- if(is.null(FP)) case2res$FP\n\t} else if (case5b.condition) {\n\t\t#print(\"Entering 5b\")\n\t\tTN <- if(is.null(TN)) case5res$TN\n\t\tFP <- if(is.null(FP)) case5res$FP\n\t} else if (case6a.condition) {\n\t\t#print(\"Entering 6a\")\n\t\tTN <- if(is.null(TN)) case6res$TN\n\t\tFP <- if(is.null(FP)) case6res$FP\n\t} else if (case8b.condition) {\n\t\t#print(\"Entering 8b\")\n\t\tTN <- if(is.null(TN)) case8res$TN\n\t\tFP <- if(is.null(FP)) case8res$FP\n\t}\n\t\n\t# Convert NULL to NA for fun, also other reasons?\n\tif(is.null(TP)) {\n \tTP <- NA\n\t}\n\tif(is.null(FN)) {\n \tFN <- NA\n\t}\n\tif(is.null(TN)) {\n \tTN <- NA\n\t}\n\tif(is.null(FP)) {\n \tFP <- NA\n\t}\n\t\n\t# calculate rounding error\n\tTP.rnd.err <- abs(TP-round(TP,digits=0))\n\tFN.rnd.err <- abs(FN-round(FN,digits=0))\n\tTN.rnd.err <- abs(TN-round(TN,digits=0))\n\tFP.rnd.err <- abs(FP-round(FP,digits=0))\n\t\n\tTP <- round(TP,digits=0)\n\tFN <- round(FN,digits=0)\n\tTN <- round(TN,digits=0)\n\tFP <- round(FP,digits=0)\n\t\n\t# return\n\tlist(TP=TP,\n\t\t FN=FN,\n\t\t TN=TN,\n\t\t FP=FP,\n\t\t TP.rnd.err=TP.rnd.err,\n\t\t FN.rnd.err=FN.rnd.err,\n\t\t TN.rnd.err=TN.rnd.err,\n\t\t FP.rnd.err=FP.rnd.err)\n}\n\ncalc.est.var <- function(ci.data) {\n # calculate estimate and variance given any two of the following: estimate, ci lower bound, ci upper bound.\n #\n est.var <- list()\n alpha <- 1.0-(ci.data$conf.level/100.0)\n mult <- abs(qnorm(alpha/2.0))\n if (isnt.null(ci.data$estimate)) {\n # if estimate is there, use it.\n if (isnt.null(ci.data$lb)) {\n est.var$estimate <- ci.data$estimate\n var <- ((logit(ci.data$estimate) - logit(ci.data$lb)) / mult)^2\n est.var$var <- var\n } else if (isnt.null(ci.data$ub)) {\n est.var$estimate <- ci.data$estimate\n var <- ((logit(ci.data$ub) - logit(ci.data$estimate)) / mult)^2\n est.var$var <- var\n }\n } else if (isnt.null(ci.data$lb) & isnt.null(ci.data$ub)) {\n # estimate isn't there.\n radius <- (logit(ci.data$ub) - logit(ci.data$lb)) / 2\n estimate <- invlogit(logit(ci.data$lb) + radius)\n est.var$estimate <- estimate\n var <- (radius / mult)^2\n est.var$var <- var\n }\n est.var\n}\n\nrescale.effect.and.ci.conf.level <- function(dataf.arg) {\n\t# Rescales est,low,high to target confidence level\n\t# dataf.arg is a dataframe of arguments\n\t\n\t# est, low, high are assumed to be on the calc scale\n\t# returns rescaled est,low,high also on calc scale\n\test = dataf.arg[[\"est\"]]\n\tlow = dataf.arg[[\"low\"]]\n\thigh = dataf.arg[[\"high\"]] \n\torig.conf.level = dataf.arg[[\"orig.conf.level\"]]\n\ttarget.conf.level = dataf.arg[[\"target.conf.level\"]]\n\t\n\t# Convert NULL to NA\n\tif (is.null(est)) est <- NA\n\tif (is.null(low)) low <- NA\n\tif (is.null(high)) high <- NA\n\t\n\t# make sure we have the right inputs\n\tnum_na = 0\n\tif (is.na(est)) {num_na = num_na + 1}\n\tif (is.na(low)) {num_na = num_na + 1}\n\tif (is.na(high)) {num_na = num_na + 1}\n\t\n\tif ((num_na > 1) || is.na(orig.conf.level) || is.na(target.conf.level)) {\n\t\treturn(list(\"FAIL\"=NA)) # failure\n\t}\n\t\n\t# make sure est, low, high are all not NA\n\tif (is.na(est)) {\n\t\test <- (high-low)/2.0\n\t}\n\n\tif (is.na(low)) {\n\t\tlow <- est - (high-est)\n\t}\n\n\tif (is.na(high)) {\n\t\thigh <- est + (est - low)\t\n\t}\n\n\told.alpha <- 1.0-(orig.conf.level/100.0)\n\tnew.alpha <- 1.0-(target.conf.level/100.0)\n\told.mult <- abs(qnorm(old.alpha/2.0))\n\tnew.mult <- abs(qnorm(new.alpha/2.0))\n\t\n\tse <- (high-low)/(2*old.mult)\n\t\n\tnew.est <- est\n\tnew.low <- new.est - new.mult*se\n\tnew.high <- new.est + new.mult*se\n\t\n\treturn(list(est=new.est, low=new.low, high=new.high))\n}\n\n\n\n########### OLD DIAGNOSTIC FUNCTIONS NOT CURRENTLY USED ########################\n# Do not delete them yet, they are good for reference #\n\n#impute.diagnostic.data <- function(diag.data, metric){\n# # this function imputes diagnostic data (i.e., 2x2 tables) from the fields\n# # available in the diagnostic.data data frame parameter.\n# #\n# if (metric==\"Sens\") {\n# ci.data <- list(estimate=diag.data$sens, lb=diag.data$sens.lb, ub=diag.data$sens.ub, conf.level=diag.data$conf.level)\n# est.var <- calc.est.var(ci.data)\n# # fill in estimate, ci.lb, ci.ub\n# diag.data$sens <- est.var$estimate\n# diag.data$sens.var <- est.var$var\n# diag.data <- calc.sens.data(diag.data)\n# } else if (metric==\"Spec\") {\n# ci.data <- list(estimate=diag.data$spec, lb=diag.data$spec.lb, ub=diag.data$spec.ub, conf.level=diag.data$conf.level)\n# est.var <- calc.est.var(ci.data)\n# # fill in estimate, ci.lb, ci.ub\n# diag.data$spec <- est.var$estimate\n# diag.data$spec.var <- est.var$var\n# diag.data <- calc.spec.data(diag.data)\n# } \n# diag.data \n#}\n\n#calc.sens.data <- function(diag.data) {\n# # back-calculate TP, FN, sens, var from any two known values\n# # Notes: TP = 1 / (( 1-sens) * var)\n# # FN = 1 / (sens * var)\n# # TP / FN = sens / (1-sens)\n# # FN / TP = (1-sens) / sens\n# #\n# TP<-NULL; FN<-NULL; TN<-NULL; FP<-NULL; sens<-NULL; sens.var<-NULL\n# \n# \n# if (isnt.null(diag.data$sens) & isnt.null(diag.data$sens.var)) {\n# sens <- diag.data$sens\n# sens.var <- diag.data$sens.var\n# diag.data$TP <- round(1 / ((1-sens) * sens.var), digits=0)\n# diag.data$FN <- round(1 / (sens * sens.var), digits=0)\n# } else if (isnt.null(diag.data$sens) & isnt.null(diag.data$TP)) {\n# sens <- diag.data$sens \n# TP <- diag.data$TP\n# diag.data$FN <- round(((1 - sens) / sens) * TP, digits=0)\n# diag.data$sens.var <- 1 / ((1 - sens) * TP)\n# } else if (isnt.null(diag.data$sens) & isnt.null(diag.data$FN)) {\n# sens <- diag.data$sens \n# FN <- diag.data$FN\n# diag.data$TP <- round((sens / (1 - sens)) * FN, digits=0)\n# diag.data$sens.var <- 1 / (sens * FN)\n# } else if (isnt.null(diag.data$sens.var) & isnt.null(diag.data$TP)) {\n# sens.var <- diag.data$sens.var \n# TP <- diag.data$TP\n# sens <- 1 - 1/(TP * sens.var)\n# diag.data$sens <- sens\n# diag.data$FN <- round(1 / (sens * sens.var), digits=0)\n# } else if (isnt.null(diag.data$sens.var) & isnt.null(diag.data$FN)) {\n# sens.var <- diag.data$sens.var \n# FN <- diag.data$FN\n# sens <- 1 / (FN * sens.var)\n# diag.data$sens <- sens\n# diag.data$TP <- round(1 / ((1-sens) * sens.var), digits=0)\n# } \n# diag.data\n#}\n#\n#calc.spec.data <- function(diag.data) {\n# # back-calculate TN, FP, spec, var from any two known values\n# # Note: This function is identical to calc.sens.data with the following substitutions:\n# # sens -> spec\n# # TP -> TN\n# # FN -> FP\n# # Could combine into one function, but it would be less readable. \n# #\n# # Notes: TN = 1 / (( 1-spec) * var)\n# # FP = 1 / (spec * var)\n# # TN / FP = spec / (1-spec)\n# # FP / TN = (1-spec) / spec\n# #\n# TN<-NULL; FP<-NULL; TN<-NULL; FP<-NULL; spec<-NULL; spec.var<-NULL\n# \n# \n# if (isnt.null(diag.data$spec) & isnt.null(diag.data$spec.var)) {\n# spec <- diag.data$spec\n# spec.var <- diag.data$spec.var\n# diag.data$TN <- round(1 / ((1-spec) * spec.var), digits=0)\n# diag.data$FP <- round(1 / (spec * spec.var), digits=0)\n# } else if (isnt.null(diag.data$spec) & isnt.null(diag.data$TN)) {\n# spec <- diag.data$spec \n# TN <- diag.data$TN\n# diag.data$FP <- round(((1 - spec) / spec) * TN, digits=0)\n# diag.data$spec.var <- 1 / ((1 - spec) * TN)\n# } else if (isnt.null(diag.data$spec) & isnt.null(diag.data$FP)) {\n# spec <- diag.data$spec \n# FP <- diag.data$FP\n# diag.data$TN <- round((spec / (1 - spec)) * FP, digits=0)\n# diag.data$spec.var <- 1 / (spec * FP)\n# } else if (isnt.null(diag.data$spec.var) & isnt.null(diag.data$TN)) {\n# spec.var <- diag.data$spec.var \n# TN <- diag.data$TN\n# spec <- 1 - 1/(TN * spec.var)\n# diag.data$spec <- spec\n# diag.data$FP <- round(1 / (spec * spec.var), digits=0)\n# } else if (isnt.null(diag.data$spec.var) & isnt.null(diag.data$FP)) {\n# spec.var <- diag.data$spec.var \n# FP <- diag.data$FP\n# spec <- 1 / (FP * spec.var)\n# diag.data$spec <- spec\n# diag.data$TN <- round(1 / ((1-spec) * spec.var), digits=0)\n# } \n# diag.data\n#}" }, { "alpha_fraction": 0.6627326607704163, "alphanum_fraction": 0.6865637898445129, "avg_line_length": 48.505619049072266, "blob_id": "419e6fe82153ed65d9ce7491af85594d55aeebca", "content_id": "c17df6a2e497310a95f0a0c963d927b828334daf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4406, "license_type": "no_license", "max_line_length": 106, "num_lines": 89, "path": "/src/forms/ui_diagnostic_metrics.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'diagnostic_metrics.ui'\n#\n# Created: Wed Apr 17 14:37:20 2013\n# by: PyQt4 UI code generator 4.10\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt4 import QtCore, QtGui\n\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n def _fromUtf8(s):\n return s\n\ntry:\n _encoding = QtGui.QApplication.UnicodeUTF8\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig, _encoding)\nexcept AttributeError:\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig)\n\nclass Ui_diag_metric(object):\n def setupUi(self, diag_metric):\n diag_metric.setObjectName(_fromUtf8(\"diag_metric\"))\n diag_metric.setWindowModality(QtCore.Qt.ApplicationModal)\n diag_metric.resize(348, 160)\n diag_metric.setMinimumSize(QtCore.QSize(250, 140))\n diag_metric.setMaximumSize(QtCore.QSize(10000, 10000))\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n diag_metric.setFont(font)\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(_fromUtf8(\":/images/meta.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n diag_metric.setWindowIcon(icon)\n self.gridLayout_2 = QtGui.QGridLayout(diag_metric)\n self.gridLayout_2.setObjectName(_fromUtf8(\"gridLayout_2\"))\n self.metrics_grp_box = QtGui.QGroupBox(diag_metric)\n self.metrics_grp_box.setObjectName(_fromUtf8(\"metrics_grp_box\"))\n self.verticalLayout = QtGui.QVBoxLayout(self.metrics_grp_box)\n self.verticalLayout.setObjectName(_fromUtf8(\"verticalLayout\"))\n self.gridLayout = QtGui.QGridLayout()\n self.gridLayout.setObjectName(_fromUtf8(\"gridLayout\"))\n self.chk_box_sens = QtGui.QCheckBox(self.metrics_grp_box)\n self.chk_box_sens.setChecked(True)\n self.chk_box_sens.setObjectName(_fromUtf8(\"chk_box_sens\"))\n self.gridLayout.addWidget(self.chk_box_sens, 0, 0, 1, 1)\n self.chk_box_spec = QtGui.QCheckBox(self.metrics_grp_box)\n self.chk_box_spec.setChecked(True)\n self.chk_box_spec.setObjectName(_fromUtf8(\"chk_box_spec\"))\n self.gridLayout.addWidget(self.chk_box_spec, 0, 1, 1, 1)\n self.chk_box_lr = QtGui.QCheckBox(self.metrics_grp_box)\n self.chk_box_lr.setChecked(True)\n self.chk_box_lr.setObjectName(_fromUtf8(\"chk_box_lr\"))\n self.gridLayout.addWidget(self.chk_box_lr, 2, 0, 1, 1)\n self.chk_box_dor = QtGui.QCheckBox(self.metrics_grp_box)\n self.chk_box_dor.setChecked(True)\n self.chk_box_dor.setObjectName(_fromUtf8(\"chk_box_dor\"))\n self.gridLayout.addWidget(self.chk_box_dor, 2, 1, 1, 1)\n spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)\n self.gridLayout.addItem(spacerItem, 1, 0, 1, 1)\n self.verticalLayout.addLayout(self.gridLayout)\n self.gridLayout_2.addWidget(self.metrics_grp_box, 0, 0, 1, 1)\n self.horizontalLayout = QtGui.QHBoxLayout()\n self.horizontalLayout.setObjectName(_fromUtf8(\"horizontalLayout\"))\n spacerItem1 = QtGui.QSpacerItem(200, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)\n self.horizontalLayout.addItem(spacerItem1)\n self.btn_ok = QtGui.QPushButton(diag_metric)\n self.btn_ok.setMaximumSize(QtCore.QSize(75, 23))\n self.btn_ok.setObjectName(_fromUtf8(\"btn_ok\"))\n self.horizontalLayout.addWidget(self.btn_ok)\n self.gridLayout_2.addLayout(self.horizontalLayout, 1, 0, 1, 1)\n\n self.retranslateUi(diag_metric)\n QtCore.QMetaObject.connectSlotsByName(diag_metric)\n\n def retranslateUi(self, diag_metric):\n diag_metric.setWindowTitle(_translate(\"diag_metric\", \"Diagnostic Metrics\", None))\n self.metrics_grp_box.setTitle(_translate(\"diag_metric\", \"select metrics for analysis\", None))\n self.chk_box_sens.setText(_translate(\"diag_metric\", \"sensitivity\", None))\n self.chk_box_spec.setText(_translate(\"diag_metric\", \"specificity\", None))\n self.chk_box_lr.setText(_translate(\"diag_metric\", \"likelihood ratio\", None))\n self.chk_box_dor.setText(_translate(\"diag_metric\", \"diagnostic odds ratio\", None))\n self.btn_ok.setText(_translate(\"diag_metric\", \"next >\", None))\n\nimport icons_rc\n" }, { "alpha_fraction": 0.5724847316741943, "alphanum_fraction": 0.576877772808075, "avg_line_length": 31.28719711303711, "blob_id": "0a5bd34d8b2c6cba9320f60ee8cb97859d5cf08d", "content_id": "30766308254a6ed5ecc7850d8a1a42498105ff3d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9333, "license_type": "no_license", "max_line_length": 160, "num_lines": 289, "path": "/src/meta_globals.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "######################################\n# # \n# Byron C. Wallace #\n# George Dietz #\n# CEBM @ Brown # \n# OpenMeta[analyst] # \n# # \n# Contains globals used #\n# throughout. # \n# # \n######################################\n\n# TODO: move functions out of here and just have this be constants w/o imports\n\nimport os\n\nfrom PyQt4 import QtCore, QtGui\nfrom PyQt4.Qt import *\n\nAPPLICATION_NAME = \"OpenMetaAnalyst\"\nORGANIZATION_NAME = \"CEBM\"\n\n# number of digits to display\nNUM_DIGITS = 3\n\n# number of digits to display in calculator\n# It is now a global here and in the data_table_view class. (However\n# here we show four digits; there it is 3. We want different\n# levels of granularity).\nCALC_NUM_DIGITS = 4\n\n# completely made up. need an actual versioning system.\nVERSION = .005 \n\nDISABLE_NETWORK_STUFF = True # disable this until we can package jags, rjags, getmc\nDEFAULT_DATASET_NAME = unicode(\"untitled_dataset\", \"utf-8\")\n\n## For now we're going to hardcode which metrics are available.\n# In the future, we may want to pull these out dynamically from \n# the R side. But then meta-analytic methods would have either to\n# only operate over the effects and variances or else themselves \n# know how to compute arbitrary metrics.\n\n# Binary metrics\nBINARY_TWO_ARM_METRICS = [\"OR\", \"RD\", \"RR\", \"AS\", \"YUQ\", \"YUY\"]\nBINARY_ONE_ARM_METRICS = [\"PR\", \"PLN\", \"PLO\", \"PAS\", \"PFT\"]\nBINARY_METRIC_NAMES = {\"OR\":\"Odds Ratio\",\n \"RD\":\"Risk Difference\",\n \"RR\":\"Risk Ratio\",\n \"AS\":\"Difference of arcsines transformed proportions\",\n \"YUQ\":\"Yule's Q is equal to (oi-1)/(oi+1), where oi is the odds ratio.\",\n \"YUY\":\"Yule's Y is equal to (sqrt(oi)-1)/(sqrt(oi)+1), where oi is the odds ratio.\",\n \"PR\":\"Untransformed Proportion\",\n \"PLN\":\"Natural Logarithm transformed Proportion\",\n \"PLO\":\"Logit transformed Proportion\",\n \"PAS\":\"Arcsine transformed Proportion\",\n \"PFT\":\"Freeman-Tukey Transformed Proportion\",\n }\n\n# Continuous metrics\nCONTINUOUS_TWO_ARM_METRICS = [\"MD\", \"SMD\"]\nCONTINUOUS_ONE_ARM_METRICS = [\"TX Mean\"]\nCONTINUOUS_METRIC_NAMES = {\"MD\":\"Mean Difference\",\n \"SMD\":\"Standardized Mean Difference\",\n \"TX Mean\":\"TX Mean\",\n }\n\n\n# Default metrics (for when making a new dataset)\nDEFAULT_BINARY_ONE_ARM = \"PR\"\nDEFAULT_BINARY_TWO_ARM = \"OR\"\nDEFAULT_CONTINUOUS_ONE_ARM = \"TX Mean\"\nDEFAULT_CONTINUOUS_TWO_ARM = \"SMD\"\n\n# Sometimes it's useful to know if we're dealing with a one-arm outcome,\n# in general\nONE_ARM_METRICS = BINARY_ONE_ARM_METRICS + CONTINUOUS_ONE_ARM_METRICS \nTWO_ARM_METRICS = BINARY_TWO_ARM_METRICS + CONTINUOUS_TWO_ARM_METRICS\n\n# Diagnostic metrics\nDIAGNOSTIC_METRICS = [\"Sens\", \"Spec\", \"PLR\", \"NLR\", \"DOR\"]\nDIAGNOSTIC_LOG_METRICS = [\"PLR\", \"NLR\", \"DOR\"]\nDIAGNOSTIC_METRIC_NAMES = {\"Sens\":\"Sensitivity\",\n \"Spec\":\"Specificity\",\n \"PLR\":\"Positive Likelihood Ratio\",\n \"NLR\":\"Negative Likelihood Ratio\",\n \"DOR\":\"Diagnostic Odds Ratio\",\n }\n\n# Construct dictionary of all the metric names\nALL_METRIC_NAMES = {}\nALL_METRIC_NAMES.update(BINARY_METRIC_NAMES)\nALL_METRIC_NAMES.update(CONTINUOUS_METRIC_NAMES)\nALL_METRIC_NAMES.update(DIAGNOSTIC_METRIC_NAMES)\n\n# enumeration of data types and dictionaries mapping both ways\nBINARY, CONTINUOUS, DIAGNOSTIC, OTHER = range(4)\n\n# we need two types for covariates; factor and continuous. we'll use the \n# above definition (enumerated as part of a general data type) for continuous\n# and just define factor here.\nFACTOR = 4\n\n# making life easier\nCOV_INTS_TO_STRS = {4:\"factor\", 1:\"continuous\"}\n\nSTR_TO_TYPE_DICT = {u\"binary\":BINARY,\n u\"continuous\":CONTINUOUS, \n u\"diagnostic\":DIAGNOSTIC,\n u\"OTHER\":OTHER\n }\n\nTYPE_TO_STR_DICT = {BINARY:u\"binary\",\n CONTINUOUS:u\"continuous\",\n DIAGNOSTIC:u\"diagnostic\",\n\t\t\t\t\tOTHER:u\"OTHER\",\n FACTOR:u\"factor\",\n }\n \n# enumeration of meta-analytic types\nVANILLA, NETWORK = range(2)\n\nEMPTY_VALS = (\"\", None) # these indicate an empty row/cell \n\nBASE_PATH = str(os.path.abspath(os.getcwd()))\n\n#def get_BASE_PATH():\n# BASE_PATH = str(os.path.abspath(os.getcwd())) # where temporary R output should go\n\n\n# this is a useful function sometimes.\nnone_to_str = lambda x: \"\" if x is None else x\n\nHELP_URL = \"http://www.cebm.brown.edu/static/oma/doc/OpenMA_help.html\"\n\n# for diagnostic data -- this dictionary maps\n# the mteric names as they appear in the UI/ure\n# used here to the names used in the model.\n# see get_diag_metrics_to_run.\nDIAG_METRIC_NAMES_D = {\"sens\":[\"Sens\"], \n \"spec\":[\"Spec\"],\n \"dor\":[\"DOR\"],\n \"lr\":[\"PLR\", \"NLR\"]\n }\n\nDIAG_FIELDS_TO_RAW_INDICES = {\"TP\":0, \"FN\":1, \"FP\":2, \"TN\":3}\n\n# list of methods with no forest plot parameters\nMETHODS_WITH_NO_FOREST_PLOT = [\"diagnostic.hsroc\", \"diagnostic.bivariate.ml\"]\n\n# this is the maximum size of a residual that we're willing to accept\n# when computing 2x2 data\nTHRESHOLD = 1e-5\n\nERROR_COLOR = QColor(\"red\")\nOK_COLOR = QColor(\"black\")\n\nDEFAULT_GROUP_NAMES = [\"Grp A\", \"Grp B\"] # old: DEFAULT_GROUP_NAMES = [\"tx A\", \"tx B\"]\n\n\ndef equal_close_enough(x,y):\n THRESHOLD = 1e-4\n if abs(x-y) < THRESHOLD:\n return True\n else:\n return False\n\n### CONFIDENCE LEVEL STUFF #####\nDEFAULT_CONF_LEVEL = 95.0 # (normal 95% CI)\n\n\n\n'''\nsome useful static methods\n'''\n\ndef seems_sane(xticks):\n num_list = xticks.split(\",\")\n if len(num_list) == 1:\n return False\n try:\n num_list = [eval(x) for x in num_list]\n except:\n return False\n return True\n \ndef check_plot_bound(bound):\n try:\n # errrm... this might cause a problem if \n # bound is 0... \n return float(bound) \n except:\n return False\n\ndef is_a_float(s):\n try:\n float(s)\n return True\n except:\n return False\n\ndef is_empty(s):\n return s is None or s == \"\"\n \ndef is_an_int(s):\n try:\n int(s)\n return True\n except:\n return False\n \n \ndef is_NaN(x):\n # there's no built-in for checking if a number is a NaN in\n # Python < 2.6. checking if a number is equal to itself\n # does the trick, though purportedly does not always work.\n return x != x\n\nclass CommandGenericDo(QUndoCommand):\n '''\n This is a generic undo/redo command that takes two unevaluated lambdas --\n thunks, if you will -- one for doing and one for undoing.\n '''\n def __init__(self, redo_f, undo_f, description=\"\"):\n super(CommandGenericDo, self).__init__(description)\n self.redo_f = redo_f\n self.undo_f = undo_f\n \n def redo(self):\n self.redo_f()\n \n def undo(self):\n self.undo_f()\n \n\ndef matrix_as_table(matrix, col_width=None, spacing=2):\n ''' matrix is a list of rows, col_width is the fixed column width '''\n \n # get default col width, wide as widgest col\n if col_width is None:\n max_width = 0\n for row in matrix:\n widths = [len(x) for x in row]\n max_width = max(max_width, max(widths))\n col_width = max_width\n\n matrix_formatted = []\n for row in matrix:\n formatted_row = [\"{0:{width}}\".format(x, width=col_width) for x in row]\n spacer = \" \"*spacing\n formatted_row = spacer.join(formatted_row)\n matrix_formatted.append(formatted_row)\n \n matrix_formatted = \"\\n\".join(matrix_formatted)\n return matrix_formatted\n\ndef tabulate(lists, sep=\" | \", return_col_widths=False, align=[]):\n ''' Makes a pretty table from the lists in args'''\n ''' each arg is a list '''\n ''' if return_max_col_lenths is true, the return type is a tuple of (str, col_widths) '''\n ''' align is a list the same length as lists telling how the column should be aligned ('L','R') etc '''\n \n if len(align) != len(lists):\n align = ['L',]*len(lists) \n print(\"Align is now %s: \" % align)\n \n # covert lists in args to string lists\n string_lists = []\n for arg in lists:\n str_arg = [str(x) for x in arg]\n string_lists.append(str_arg)\n \n # get max length of each element in each column\n max_lengths = []\n for arg in string_lists:\n max_len = max([len(x) for x in arg])\n max_lengths.append(max_len)\n \n data = zip(*string_lists)\n out = []\n for row in data:\n row_str = [\"{0:{align}{width}}\".format(x, width=width,align='<' if row_alignment=='L' else '>') for x,width,row_alignment in zip(row,max_lengths,align)]\n row_str = sep.join(row_str)\n out.append(row_str)\n out_str = \"\\n\".join(out)\n \n if return_col_widths:\n return (out_str, max_lengths)\n return out_str\n\n\n" }, { "alpha_fraction": 0.33448275923728943, "alphanum_fraction": 0.35517242550849915, "avg_line_length": 17.33333396911621, "blob_id": "419770d8a25ed9afae336ec1a8c22f2d33855d3f", "content_id": "4cc2a306e21d63190b7e6da31e27ab460eb49111", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 290, "license_type": "no_license", "max_line_length": 54, "num_lines": 15, "path": "/src/R/HSROC/R/REFSTD_3.R", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "REFSTD_3 <-\r\nfunction (rs, n.sample) \r\n{\r\n if (rs[[1]] == 1) {\r\n n_rs = sum(n.sample)\r\n }\r\n else {\r\n n.rs = rs[[1]]\r\n n_rs = numeric()\r\n for (i in 1:n.rs) {\r\n n_rs = c(n_rs, sum(n.sample[rs[[i + 1]]]))\r\n }\r\n }\r\n return(n_rs)\r\n}\r\n" }, { "alpha_fraction": 0.5702988505363464, "alphanum_fraction": 0.5755256414413452, "avg_line_length": 37.84376907348633, "blob_id": "3696131f76fc3123a85020c0fe5e543ddc0193d1", "content_id": "81615b19120afefbfa8893910981dc44f1d4f745", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 58927, "license_type": "no_license", "max_line_length": 250, "num_lines": 1517, "path": "/src/meta_py_r.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "#############################################################################\n# #\n# Byron C. Wallace #\n# George Dietz #\n# CEBM @ Brown # \n# OpenMeta[analyst] #\n# #\n# This is a proxy module that is responsible for communicating with R. #\n# An unholy mixture of R and Python # \n# **All calls to R (equivalently, all references to the rpy2 library) #\n# are to be made via this module ** #\n# #\n#############################################################################\n\nprint(\"Entering meta_py_r for import probably\")\nimport math\nimport os\nfrom meta_globals import *\nfrom settings import *\n\nprint(\"the path: %s\" % os.getenv(\"PATH\"))\n\ntry:\n print(\"importing from rpy2\")\n # will fail if not properly configured\n # good place to debug when trying to get the mac build to work\n #from rpy2 import robjects as ro\n import rpy2.robjects as ro\n print(\"succesfully imported from rpy2\")\nexcept Exception, e:\n print e\n print(\"rpy2 import problem\")\n #pyqtRemoveInputHook()\n #pdb.set_trace()\n raise Exception(\"rpy2 not properly installed!\")\n print e\nprint(\"importing rpy2.robjects\")\nimport rpy2.robjects\nprint(\"succesfully imported rpy2.robjects\")\n\ndef execute_r_string(r_str):\n \n try:\n print(\"Executing: %s\\n\" % r_str)\n return ro.r(r_str)\n except Exception as e:\n # reset working directory in r then raise the error, hope this will address issue #244\n print(\"something bad happened in R\")\n reset_Rs_working_dir()\n raise e\n\n#################### R Library Loader ####################\nclass RlibLoader:\n def __init__(self):\n print(\"R Libary loader (RlibLoader) initialized...\")\n def load_metafor(self):\n return self._load_r_lib(\"metafor\")\n def load_openmetar(self):\n return self._load_r_lib(\"openmetar\")\n def load_igraph(self):\n return self._load_r_lib(\"igraph\")\n def load_grid(self):\n return self._load_r_lib(\"grid\")\n def load_gemtc(self):\n return self._load_r_lib(\"gemtc\")\n def load_all(self):\n self.load_metafor()\n self.load_openmetar()\n self.load_igraph()\n self.load_grid()\n self.load_gemtc()\n def _load_r_lib(self, name):\n try:\n execute_r_string(\"library(%s)\" % name)\n msg = \"%s package successfully loaded\" % name\n print(msg)\n return (True, msg)\n except:\n raise Exception(\"The %s R package is not installed.\\nPlease \\\ninstall this package and then re-start OpenMeta.\" % name)\n#################### END OF R Library Loader ####################\n\ndef RfunctionCaller(function):\n def _RfunctionCaller(*args, **kw):\n print(\"Using rpy2 interface to R to call %s\" % function.func_name)\n res = function(*args, **kw)\n return res\n return _RfunctionCaller\n\ndef get_R_libpaths():\n ''' Returns the libpaths that R looks at, sanity check to make sure it sees the right paths '''\n \n libpaths = execute_r_string(\".libPaths()\")\n print(\"R Lib paths:\")\n for i, path in enumerate(libpaths):\n print(\"%d: %s\" % (i,path))\n return list(libpaths)\n\n@RfunctionCaller\ndef reset_Rs_working_dir():\n ''' resets R's working directory to the the application base_path, not to r_tmp!'''\n print(\"resetting R working dir\")\n\n # Fix paths issue in windows\n base_path = get_base_path()\n base_path = to_posix_path(base_path)\n\n print(\"Trying to set base_path to %s\" % base_path)\n r_str = \"setwd('%s')\" % base_path\n # Executing r call with escaped backslashes\n ro.r(r_str)\n\n print(\"Set R's working directory to %s\" % base_path)\n\n@RfunctionCaller\ndef impute_diag_data(diag_data_dict):\n print \"computing 2x2 table via R...\"\n print diag_data_dict\n\n # rpy2 doesn't know how to handle None types.\n # we can just remove them from the dictionary.\n for param, val in diag_data_dict.items():\n if val is None:\n diag_data_dict.pop(param)\n\n dataf = ro.r['data.frame'](**diag_data_dict)\n two_by_two = execute_r_string(\"gimpute.diagnostic.data(%s)\" % (dataf.r_repr()))\n \n \n imputed_2x2 = R_parse_tools.rlist_to_pydict(two_by_two)\n print (\"Imputed 2x2: %s\" % str(imputed_2x2))\n \n return imputed_2x2\n\n## Used anymore?\n#def rescale_effect_and_ci_conf_level(data_dict):\n# print(\"Rescaling effect and CI confidence level\")\n# \n# return R_fn_with_dataframe_arg(data_dict, \"rescale.effect.and.ci.conf.level\")\n\n@RfunctionCaller\ndef R_fn_with_dataframe_arg(data_dict, R_fn_name):\n '''Calls an R function which takes a dataframe as its only argument w/\n parameters within. Returns a python dictionary. Assumes R function returns\n an R list'''\n \n for param, val in data_dict.items():\n if val is None:\n data_dict.pop(param)\n\n dataf = ro.r['data.frame'](**data_dict)\n r_string = R_fn_name + \"(\" + str(dataf.r_repr()) + \")\"\n \n print(\"executing (from R_fn_with_dataframe_arg: %s\" % r_string)\n R_result = execute_r_string(r_string)\n \n res_as_dict = R_parse_tools.recursioner(R_result)\n return res_as_dict\n\n@RfunctionCaller\ndef impute_bin_data(bin_data_dict):\n remove_value(None, bin_data_dict)\n\n dataf = ro.r['data.frame'](**bin_data_dict)\n two_by_two = execute_r_string(\"gimpute.bin.data(%s)\" % (dataf.r_repr()))\n \n res_as_dict = R_parse_tools.recursioner(two_by_two)\n \n \n \n return res_as_dict\n\n@RfunctionCaller\ndef back_calc_cont_data(group1_data, group2_data, effect_data, conf_level):\n remove_value(None, group1_data)\n remove_value(None, group2_data)\n remove_value(None, effect_data)\n \n dataf_grp1 = ro.r['data.frame'](**group1_data)\n dataf_grp2 = ro.r['data.frame'](**group2_data)\n dataf_effect = ro.r['data.frame'](**effect_data)\n \n r_res = execute_r_string(\"gimpute.cont.data(%s,%s,%s,%s)\" % (dataf_grp1.r_repr(),\n dataf_grp2.r_repr(),\n dataf_effect.r_repr(),\n str(conf_level)))\n \n res_as_dict = R_parse_tools.recursioner(r_res)\n \n return res_as_dict\n \n\ndef remove_value(toRemove, t_dict):\n ''' Removes all entries in t_dict with value toRemove'''\n for param, val in t_dict.items():\n if val == toRemove:\n t_dict.pop(param)\n\ndef _gis_NA(x):\n return str(x) == 'NA'\n\n###### R data structure tools #############\n\nclass R_parse_tools:\n ''' a set of tools to help parse data structures returned from rpy2 '''\n def __init__(self):\n pass\n \n @staticmethod\n def rlist_to_pydict(named_r_list):\n ''' parse named R list into a python dictionary.'''\n #Only parses one level, is not recursive.'''\n \n keys = named_r_list.names\n if str(keys) == \"NULL\":\n raise ValueError(\"No names found in alleged named R list\")\n \n data = R_parse_tools.R_iterable_to_pylist(named_r_list)\n d = dict(zip(keys, data)) \n \n return d\n \n @staticmethod\n def recursioner(data):\n '''\n named_r_list --> python dictionary\n not named r_list --> python list\n singleton_r_list ---> python scalar\n '''\n \n if R_parse_tools.haskeys(data): # can be converted to dictionary\n d = R_parse_tools.rlist_to_pydict(data)\n for k,v in d.items():\n d[k] = R_parse_tools.recursioner(v)\n return d\n elif R_parse_tools._isListable(data): # can be converted to list\n l = R_parse_tools.R_iterable_to_pylist(data)\n for i,v in enumerate(l):\n l[i] = R_parse_tools.recursioner(v)\n return l\n else: # is a scalar\n return R_parse_tools._convert_NA_to_None(data) # convert NA to None\n \n \n \n \n @staticmethod\n def R_iterable_to_pylist(r_iterable):\n ''' Converts an r_iterable (i.e. list or vector) to a python list.\n Will convert singleton elements to scalars in the list but not the list\n itself if it is singleton. '''\n \n def filter_list_element(x):\n ''' if x is a singleton list, converts x to a scalar '''\n if R_parse_tools._isListable(x) and len(x) == 1:\n return R_parse_tools._singleton_list_to_scalar(x)\n else:\n return x\n \n python_list = list(r_iterable)\n python_list = [filter_list_element(x) for x in python_list]\n return python_list\n \n @staticmethod\n def _singleton_list_to_scalar(singleton_list):\n ''' Takes in a singleton R list and returns a scalar value and converts 'NA'\n to None '''\n \n if len(singleton_list) > 1:\n raise ValueError(\"Expected a singleton list but this list has more than one entry\")\n \n # special case of a factor ve\n if type(singleton_list) == rpy2.robjects.vectors.FactorVector:\n return execute_r_string(\"as.character(%s)\" % singleton_list.r_repr())[0]\n \n scalar = singleton_list[0]\n return R_parse_tools._convert_NA_to_None(scalar)\n \n \n @staticmethod\n def _convert_NA_to_None(scalar):\n if str(scalar) == 'NA':\n return None\n else:\n return scalar\n \n @staticmethod\n def _isListable(element, exclude_strings = True):\n try:\n list(element)\n except TypeError:\n return False\n \n # don't want to treat strings as lists even though they are iterable\n if exclude_strings and type(element) == str:\n return False\n \n return True\n \n @staticmethod\n def haskeys(r_object):\n if not hasattr(r_object,'names'):\n return False\n \n return str(r_object.names) != \"NULL\"\n \n\n \n\n#### end of R data structure tools #########\n\n\n# This should be renamed as it is not doing back-calculation from effects\n@RfunctionCaller\ndef impute_cont_data(cont_data_dict, alpha):\n print \"computing continuous data via R...\"\n \n # first check that we have some data;\n # if not, there's no sense in trying to\n # impute anything\n if len(cont_data_dict.items()) == 0:\n return {\"succeeded\":False}\n \n r_str = [\"fillin.cont.1spell(\"]\n for param, val in cont_data_dict.items():\n r_str.append(\"%s=%s,\" % (param, val))\n \n r_str = \"\".join(r_str)\n\n # append alpha argument (for CI level); close function call (parens)\n r_str += \"alpha=%s)\" % alpha\n \n print \"attempting to execute: %s\" % r_str\n c_data = execute_r_string(r_str)\n \n results = R_parse_tools.recursioner(c_data)\n \n return results\n\n@RfunctionCaller\ndef impute_pre_post_cont_data(cont_data_dict, correlation, alpha):\n if len(cont_data_dict.items()) == 0:\n return {\"succeeded\":False}\n \n r_str = [\"fillin.cont.AminusB(\"]\n for param, val in cont_data_dict.items():\n r_str.append(\"%s=%s,\" % (param, val))\n \n r_str = \"\".join(r_str)\n r_str += \"correlation=%s, alpha=%s)\" % (correlation, alpha)\n print \"attempting to execute: %s\" % r_str\n c_data = execute_r_string(r_str)\n pythonized_data = R_parse_tools.recursioner(c_data)\n \n \n return pythonized_data\n\n##################### DEALING WITH CONFIDENCE LEVEL IN R #######################\n@RfunctionCaller\ndef get_mult_from_r(confidence_level):\n alpha = 1-float(confidence_level)/100.0\n r_str = \"abs(qnorm(%s/2))\" % str(alpha)\n mult = execute_r_string(r_str)\n return mult[0]\n################################################################################\n\n@RfunctionCaller\ndef none_to_null(x):\n if x is None:\n return ro.r['as.null']()\n return x\n\ndef get_params(method_name):\n param_list = execute_r_string(\"%s.parameters()\" % method_name)\n # note that we're assuming that the last entry of param_list, as provided\n # by the corresponding R routine, is the order to display the variables\n param_d = {}\n for name, r_obj in zip(param_list.names, param_list): \n param_d[name] = r_obj\n\n order_vars = None\n if param_d.has_key(\"var_order\"):\n order_vars = list(param_d[\"var_order\"])\n\n pretty_names_and_descriptions = get_pretty_names_and_descriptions_for_params(\n method_name, param_list)\n\n return (R_parse_tools.recursioner(param_d['parameters']),\n R_parse_tools.recursioner(param_d['defaults']),\n order_vars,\n pretty_names_and_descriptions,\n )\n\n \n@RfunctionCaller\ndef get_pretty_names_and_descriptions_for_params(method_name, param_list):\n method_list = execute_r_string(\"lsf.str('package:openmetar')\")\n pretty_names_f = \"%s.pretty.names\" % method_name\n params_d = {}\n if pretty_names_f in method_list:\n # try to match params to their pretty names and descriptions\n pretty_names_and_descriptions = execute_r_string(\"%s()\" % pretty_names_f)\n # this dictionary is assumed to be as follows:\n # params_d[param] --> {\"pretty.name\":XX, \"description\":XX}\n params_d = R_parse_tools.recursioner(pretty_names_and_descriptions)\n\n # fill in entries for parameters for which pretty names/descriptions were\n # not provided-- these are just place-holders to make processing this\n # easier\n names_index = param_list.names.index(\"parameters\") \n param_names = param_list[names_index].names # pull out the list\n for param in param_names:\n if not param in params_d.keys():\n params_d[param] = {\"pretty.name\":param, \"description\":\"None provided\"}\n \n return params_d\n\n\n@RfunctionCaller\ndef get_available_methods(for_data_type=None, data_obj_name=None, metric=None):\n '''\n Returns a list of methods available in OpenMeta for the particular data_type\n (if one is given). Excludes \"*.parameters\" methods\n '''\n method_list = execute_r_string(\"lsf.str('package:openmetar')\")\n\n # the following constitute 'special' or 'reserved' function\n # names that are used by meta-analyst to parse out available\n # methods and their parameters. we exclude these from the list\n # of available meta-analytic routines.\n # \n # by convention, the methods available for a data type (e.g., binary)\n # start with the name of the data type. furthermore, the parameters\n # for those methods are returned by a method with a name\n # ending in \".parameters\"\n special_endings = [\".parameters\", \".is.feasible\", \".overall\",\n \".regression\", \"transform.f\", \".pretty.names\",\".value.info\",\n \"is.feasible.for.funnel\"]\n is_special = lambda f: any([f.endswith(ending) for ending in special_endings])\n all_methods = [method for method in method_list if not is_special(method)]\n if for_data_type is not None:\n all_methods = [method for method in all_methods if method.startswith(for_data_type)]\n\n # now, if a data object handle was provided, check which methods are feasible\n if data_obj_name is not None:\n # we will return a dictionary mapping pretty\n # names (optionally) to method names; if no pretty name exists,\n # then we just map the method name to itself.\n # note that if more than one method exists with the same pretty name\n # it will be overwritten!\n feasible_methods = {}\n for method in all_methods:\n is_feasible = True\n # we check if the author of this method has provided an is.feasible\n # routine; if so, we will call it. otherwise, we assume that we can\n # invoke the corresponding routine (i.e., we assume it's feasible)\n is_feas_f = \"%s.is.feasible\" % method\n if is_feas_f in method_list:\n # we need to pass along the metric along with the data \n # object to assess if a given method is feasible (e.g,.\n # PETO for binary data only makes sense for 'OR')\n is_feasible = execute_r_string(\"%s(%s, '%s')\" % (is_feas_f, data_obj_name, metric))[0]\n \n if is_feasible:\n # do we have a pretty name?\n pretty_names_f = \"%s.pretty.names\" % method\n if pretty_names_f in method_list:\n pretty_name = execute_r_string(\"%s()$pretty.name\" % pretty_names_f)[0]\n feasible_methods[pretty_name] = method\n else:\n # no? then just map to the function name\n feasible_methods[method] = method\n return feasible_methods\n\n@RfunctionCaller\ndef get_method_description(method_name):\n pretty_names_f = \"%s.pretty.names\" % method_name\n method_list = execute_r_string(\"lsf.str('package:openmetar')\")\n description = \"None provided.\"\n if pretty_names_f in method_list:\n try:\n description = execute_r_string(\"%s()$description\" % pretty_names_f)[0]\n except:\n pass\n return description\n\n\n#def ma_dataset_to_binary_robj(table_model, var_name):\n# pass\n\n\n@RfunctionCaller\ndef draw_network(edge_list, unconnected_vertices, network_path = '\"./r_tmp/network.png\"'):\n '''\n This draws the parametric network specified by edge_list.\n The latter is assumed to be in form:\n [\"tx a\", \"tx b\", \"tx b\", \"tx c\" .... \"tx z']\n Where two adjacent entires in the list are connected.\n Note that we (lazily) make all calls to R here rather than\n implementing a method on the R side that takes a graph/\n edge list. We may want to change this eventually.\n '''\n if len(edge_list) > 0:\n edge_str = \", \".join([\" '%s' \" % x for x in edge_list])\n execute_r_string(\"el <- matrix(c(%s), nc=2, byrow=TRUE)\" % edge_str)\n execute_r_string(\"g <- graph.edgelist(el, directed=FALSE)\")\n else:\n execute_r_string(\"g <- graph.empty()\") \n \n if len(unconnected_vertices) > 0:\n print unconnected_vertices\n vertices_str = \", \".join([\" '%s' \" % x for x in unconnected_vertices])\n execute_r_string(\"g <- add.vertices(g, %s, name=c(%s))\" % (len(unconnected_vertices), vertices_str))\n execute_r_string(\"png(%s)\" % network_path)\n execute_r_string(\"plot(g, vertex.label=V(g)$name, layout=layout.circle, vertex.size=25, asp=.3, margin=-.05)\")\n execute_r_string(\"dev.off()\")\n return \"r_tmp/network.png\"\n \n@RfunctionCaller\ndef ma_dataset_to_simple_continuous_robj(table_model, var_name=\"tmp_obj\",\n covs_to_include=None, studies=None):\n r_str = None\n \n if studies is None:\n # grab all studies. note: the list is pulled out in reverse order from the \n # model, so we, er, reverse it.\n studies = table_model.get_studies()\n # the study_ids preserve the ordering\n study_ids = [study.id for study in studies]\n study_names = \", \".join([\"\\\"\" + study.name + \"\\\"\" for study in studies])\n \n # issue #139 -- also grab the years\n none_to_str = lambda n : str(n) if n is not None else \"\" # this will produce NA ints\n study_years = \", \".join([\"as.integer(%s)\" % none_to_str(study.year) for study in studies])\n\n ests, SEs = table_model.get_cur_ests_and_SEs(only_these_studies=study_ids) \n ests_str = \", \".join(_to_strs(ests))\n SEs_str = \", \".join(_to_strs(SEs))\n \n cov_str = list_of_cov_value_objects_str(table_model.dataset,\n study_ids,\n cov_list=covs_to_include,)\n\n\n # first try and construct an object with raw data -- note that if\n # we're using a one-armed metric for cont. data, we just use y/SE\n if (not table_model.current_effect in ONE_ARM_METRICS) and \\\n table_model.included_studies_have_raw_data():\n print \"we have raw data... parsing, parsing, parsing\"\n \n raw_data = table_model.get_cur_raw_data(only_these_studies=study_ids)\n Ns1_str = _get_str(raw_data, 0)\n means1_str = _get_str(raw_data, 1)\n SDs1_str = _get_str(raw_data, 2)\n Ns2_str = _get_str(raw_data, 3)\n means2_str = _get_str(raw_data, 4)\n SDs2_str = _get_str(raw_data, 5)\n\n r_str = \"%s <- new('ContinuousData', \\\n N1=c(%s), mean1=c(%s), sd1=c(%s), \\\n N2=c(%s), mean2=c(%s), sd2=c(%s), \\\n y=c(%s), SE=c(%s), study.names=c(%s),\\\n years=c(%s), covariates=%s)\" \\\n % (var_name, Ns1_str, means1_str, SDs1_str,\n Ns2_str, means2_str, SDs2_str,\n ests_str, SEs_str, study_names, study_years, cov_str)\n \n else:\n print \"no raw data (or one-arm)... using effects\"\n r_str = \"%s <- new('ContinuousData', \\\n y=c(%s), SE=c(%s), study.names=c(%s),\\\n years=c(%s), covariates=%s)\" \\\n % (var_name, ests_str, SEs_str, \\\n study_names, study_years, cov_str)\n \n # character encodings for R\n r_str = _sanitize_for_R(r_str)\n print \"executing: %s\" % r_str\n execute_r_string(r_str)\n print \"ok.\"\n return r_str\n \n \ndef _get_str(M, col_index, reverse=True):\n x = _get_col(M, col_index)\n if reverse:\n x.reverse()\n return \", \".join(_to_strs(x))\n \n\n@RfunctionCaller\ndef ma_dataset_to_simple_binary_robj(table_model, var_name=\"tmp_obj\", \n include_raw_data=True, covs_to_include=None,\n studies=None):\n '''\n This converts a DatasetModel to an OpenMetaData (OMData) R object. We use type DatasetModel\n rather than a DataSet model directly to access the current variables. Furthermore, this allows\n us to check which studies (if any) were excluded by the user.\n \n By 'simple' we mean that this method returns a single outcome single follow-up (defined as the\n the currently selected, as indicated by the model object) data object.\n\n @TODO\n - implement methods for more advanced conversions, i.e., for multiple outcome\n datasets (althought this will be implemented in some other method)\n '''\n r_str = None\n \n if studies is None:\n # grab the study names. note: the list is pulled out in reverse order from the \n # model, so we, er, reverse it.\n studies = table_model.get_studies(only_if_included=True)\n\n study_ids = [study.id for study in studies]\n\n # issue #139 -- also grab the years\n none_to_str = lambda n : str(n) if n is not None else \"\" # this will produce NA ints\n study_years = \", \".join([\"as.integer(%s)\" % none_to_str(study.year) for study in studies])\n study_names = \", \".join([\"\\\"\" + study.name + \"\\\"\" for study in studies])\n \n ests, SEs = table_model.get_cur_ests_and_SEs(only_if_included=True, only_these_studies=study_ids)\n ests_str = \", \".join(_to_strs(ests))\n SEs_str = \", \".join(_to_strs(SEs))\n\n \n # generate the covariate string\n cov_str = list_of_cov_value_objects_str(table_model.dataset, study_ids,\n cov_list=covs_to_include)\n \n\n # first try and construct an object with raw data\n if include_raw_data and table_model.included_studies_have_raw_data():\n print \"ok; raw data has been entered for all included studies\"\n \n # now figure out the raw data\n raw_data = table_model.get_cur_raw_data(only_these_studies=study_ids)\n \n g1_events = _get_col(raw_data, 0)\n \n g1O1_str = \", \".join(_to_strs(g1_events))\n g1_totals = _get_col(raw_data, 1)\n \n g1O2 = [(total_i-event_i) for total_i, event_i in zip(g1_totals, g1_events)]\n g1O2_str = \", \".join(_to_strs(g1O2))\n \n # now, for group 2; we only set up the string\n # for group two if we have a two-arm metric\n g2O1_str, g2O2_str = \"0\", \"0\" # the 0s are just to satisfy R; not used\n if table_model.current_effect in TWO_ARM_METRICS: \n g2_events = _get_col(raw_data, 2)\n g2O1_str = \", \".join(_to_strs(g2_events))\n\n g2_totals = _get_col(raw_data, 3)\n g2O2 = [(total_i-event_i) for total_i, event_i in zip(g2_totals, g2_events)]\n g2O2_str = \", \".join(_to_strs(g2O2))\n \n # actually creating a new object on the R side seems the path of least resistance here.\n # the alternative would be to try and create a representation of the R object on the \n # python side, but this would require more work and I'm not sure what the benefits\n # would be\n r_str = \"%s <- new('BinaryData', g1O1=c(%s), g1O2=c(%s), g2O1=c(%s), g2O2=c(%s), \\\n y=c(%s), SE=c(%s), study.names=c(%s), years=c(%s), covariates=%s)\" % \\\n (var_name, g1O1_str, g1O2_str, g2O1_str, g2O2_str, \\\n ests_str, SEs_str, study_names, study_years, cov_str)\n\n elif table_model.included_studies_have_point_estimates():\n print \"not sufficient raw data, but studies have point estimates...\"\n\n r_str = \"%s <- new('BinaryData', y=c(%s), SE=c(%s), study.names=c(%s), years=c(%s), covariates=%s)\" \\\n % (var_name, ests_str, SEs_str, study_names, study_years, cov_str)\n \n \n else:\n print \"there is neither sufficient raw data nor entered effects/CIs. I cannot run an analysis.\"\n # @TODO complain to the user here\n \n\n ### Relevant for Issue #73\n # ok, it seems R uses latin-1 for its unicode encodings,\n # whereas QT uses UTF8. this can cause situations where\n # rpy2 throws up on this call due to it not being able\n # to parse a character; so we sanitize. This isn't great,\n # because sometimes characters get garbled...\n r_str = _sanitize_for_R(r_str)\n print \"executing: %s\" % r_str\n execute_r_string(r_str)\n print \"ok.\"\n return r_str\n\ndef ma_dataset_to_simple_network(table_model,\n var_name=\"tmp_obj\",\n studies=None,\n data_type=None,\n outcome=None,\n follow_up=None,\n network_path='./r_tmp/network.png'):\n ''' This converts a DatasetModel to an mtc.network R object as described\n in the getmc documentation for mtc.network'''\n \n if data_type not in [BINARY, CONTINUOUS]:\n raise ValueError(\"Given data type: '%s' is unknown.\" % str(data_type))\n \n if studies is None:\n # we will exclude studies later on if they do not have full raw_data\n studies = table_model.get_studies(only_if_included=False)\n \n #### Makes sure each group has at least one study with full raw data ####\n group_names = table_model.dataset.get_group_names_for_outcome_fu(outcome, follow_up)\n groups_to_include = []\n for group in group_names:\n for study in studies:\n ma_unit = study.outcomes_to_follow_ups[outcome][follow_up]\n raw_data = ma_unit.get_raw_data_for_group(group)\n if not _data_blank_or_none(*raw_data):\n groups_to_include.append(group)\n break\n print(\"groups to include: %s\" % str(groups_to_include))\n \n \n ############ Make 'treatments' data frame in R ###################\n \n # different id scheme in future? instead of just numbers?\n ids, descriptions = list(range(len(groups_to_include))), groups_to_include\n treatments = {'id': [x.replace(' ','_') for x in descriptions], #ids, \"\"\n 'description': descriptions}\n treatments_table_str = _make_table_string_from_dict(treatments)\n treatments_r_str = \"treatments <- read.table(textConnection('%s'), header=TRUE)\" % treatments_table_str\n #execute_r_string(treatments_r_str)\n execute_r_string(treatments_r_str)\n \n # Make 'data' data_frame in R\n if data_type == BINARY:\n data = {'study':[], 'treatment':[], 'responders':[], 'sampleSize':[]}\n elif data_type == CONTINUOUS:\n data = {'study':[], 'treatment':[], 'mean':[], 'std.dev':[], 'sampleSize':[]}\n\n for study in studies:\n #ma_unit = table_model.get_current_ma_unit_for_study(table_model.dataset.studies.index(study))\n #ma_unit = table_model.get_ma_unit(study=study, outcome=outcome, follow_up=follow_up):\n ma_unit = study.outcomes_to_follow_ups[outcome][follow_up]\n \n for treatment_id, group_name in zip(treatments['id'], treatments['description']):\n raw_data = ma_unit.get_raw_data_for_group(group_name)\n if data_type == BINARY:\n responders, sampleSize = raw_data\n data['responders'].append(responders)\n data['sampleSize'].append(sampleSize)\n elif data_type == CONTINUOUS:\n sampleSize, mean, std_dev = raw_data\n data['mean'].append(mean)\n data['std_dev'].append(std_dev)\n data['sampleSize'].append(sampleSize)\n if _data_blank_or_none(*raw_data): # make sure raw data is full\n continue\n data['study'].append(study.id)\n data['treatment'].append(treatment_id)\n data_table_str = _make_table_string_from_dict(data)\n data_table_r_str = \"data <- read.table(textConnection('%s'), header=TRUE)\" % data_table_str\n execute_r_string(data_table_r_str)\n \n ########## make the actual network ##########\n make_network_r_str = \"network <- mtc.network(data, description=\\\"MEWANTFOOD\\\", treatments=treatments)\"\n execute_r_string(make_network_r_str)\n \n # plot the network and return path to the image\n execute_r_string(\"png('%s')\" % network_path)\n execute_r_string(\"plot(network)\")\n execute_r_string(\"dev.off()\")\n \n return network_path\n \ndef _data_blank_or_none(*args):\n ''' Returns True if there is a blank or none value in args,\n Returns False otherwise'''\n \n if args is None:\n return True\n \n for x in args:\n if x in EMPTY_VALS:\n return True\n return False\n\n \ndef _make_table_string_from_dict(table_dict):\n '''Makes a string from dictionary d with the keys of d serving as the\n column headers'''\n \n keys, values = table_dict.keys(), table_dict.values()\n if len(keys)==0:\n raise ValueError(\"Dictionary must have at least one key\")\n \n #import pdb; pdb.set_trace()\n\n headers = [unicode(key) for key in keys]\n header_str = u' '.join(headers)\n table_str = header_str + \"\\n\"\n \n table_row_data = zip(*values)\n \n row_strings = []\n def process_datum(x):\n # quote strings\n if type(x) in [str, unicode]:\n return '\"' + str(x) + '\"'\n else:\n return str(x)\n row_data_to_row_str = lambda row_data: u\" \".join([process_datum(datum) for datum in row_data])\n\n \n \n for row_data in table_row_data:\n row_str = row_data_to_row_str(row_data)\n row_strings.append(row_str)\n table_data_str = u\"\\n\".join(row_strings)\n \n table_str += table_data_str\n \n return table_str\n\n\ndef _sanitize_for_R(a_str):\n # may want to do something fancier in the future...\n return a_str.encode('latin-1', 'ignore')\n \n \n # Mysterious fix for issue #73. For some reason, R doesn't throw up anymore\n # when non-latin characters are given. Maybe this was fixed in R at some\n # point by a 3rd party.\n #return a_str\n\n\n@RfunctionCaller\ndef ma_dataset_to_simple_diagnostic_robj(table_model, var_name=\"tmp_obj\",\n metric=\"Sens\", covs_to_include=None,\n effects_on_disp_scale=False, \n studies=None):\n '''\n This converts a DatasetModel to an OpenMetaData (OMData) R object. We use type DatasetModel\n rather than a DataSet model directly to access the current variables. Furthermore, this allows\n us to check which studies (if any) were excluded by the user.\n\n\n '''\n r_str = None\n \n # grab the study names. note: the list is pulled out in reverse order from the \n # model, so we, er, reverse it.\n if studies is None:\n studies = table_model.get_studies(only_if_included=True)\n study_ids = [study.id for study in studies]\n\n study_names = \", \".join([\"\\\"\" + study.name + \"\\\"\" for study in studies])\n # issue #139 -- also grab the years\n study_years = \", \".join([\"as.integer(%s)\" % study.year for study in studies])\n\n y_ests, y_SEs = table_model.get_cur_ests_and_SEs(only_if_included=True, effect=metric)\n\n y_ests_str = \", \".join(_to_strs(y_ests))\n y_SEs_str = \", \".join(_to_strs(y_SEs))\n\n # generate the covariate string\n cov_str = list_of_cov_value_objects_str(table_model.dataset,\n study_ids,\n cov_list=covs_to_include)\n\n\n # first try and construct an object with raw data\n if table_model.included_studies_have_raw_data():\n print \"ok; raw data has been entered for all included studies\"\n \n # grab the raw data; the order is \n # tp, fn, fp, tn\n raw_data = table_model.get_cur_raw_data()\n\n ### assembling TP, FP, TN and FN strings ...\n tps_str = \", \".join(_to_strs(_get_col(raw_data, 0)))\n fns_str = \", \".join(_to_strs(_get_col(raw_data, 1)))\n fps_str = \", \".join(_to_strs(_get_col(raw_data, 2)))\n tns_str = \", \".join(_to_strs(_get_col(raw_data, 3)))\n \n # actually creating a new object on the R side seems the path of least resistance here.\n # the alternative would be to try and create a representation of the R object on the \n # python side, but this would require more work and I'm not sure what the benefits\n # would be\n r_str = \"%s <- new('DiagnosticData', TP=c(%s), FN=c(%s), TN=c(%s), FP=c(%s), \\\n y=c(%s), SE=c(%s), study.names=c(%s), years=c(%s), covariates=%s)\" % \\\n (var_name, tps_str, fns_str, tns_str, fps_str, \\\n y_ests_str, y_SEs_str, study_names, study_years, cov_str)\n \n elif table_model.included_studies_have_point_estimates(effect=metric):\n print \"not sufficient raw data, but studies have point estimates...\"\n\n r_str = \"%s <- new('DiagnosticData', y=c(%s), SE=c(%s), study.names=c(%s), \\\n years=c(%s), covariates=%s)\" \\\n % (var_name, y_ests_str, y_SEs_str, study_names, study_years, cov_str)\n \n else:\n print \"there is neither sufficient raw data nor entered effects/CIs. I cannot run an analysis.\"\n # @TODO complain to the user here\n \n # character (unicode) encodings for R\n r_str = _sanitize_for_R(r_str)\n execute_r_string(r_str)\n print \"ok.\"\n return r_str\n\n\ndef cov_to_str(cov, study_ids, dataset, \\\n named_list=True, return_cov_vals=False):\n '''\n The string is constructed so that the covariate\n values are in the same order as the 'study_names'\n list.\n '''\n cov_str = None\n if named_list:\n cov_str = \"%s=c(\" % cov.name\n else:\n cov_str = \"c(\"\n\n cov_value_d = dataset.get_values_for_cov(cov.name, ids_for_keys=True)\n \n # get the study ids in the same order as the names\n cov_values = []\n \n for study_id in study_ids:\n if cov.data_type == CONTINUOUS:\n if cov_value_d.has_key(study_id):\n cov_values.append(\"%s\" % cov_value_d[study_id])\n else:\n cov_values.append(\"NA\")\n else:\n if cov_value_d.has_key(study_id):\n # factor; note the string.\n cov_values.append(\"'%s'\" % unicode(str(cov_value_d[study_id]).encode('latin1'), 'latin1'))\n else:\n cov_values.append(\"NA\")\n cov_str += \",\".join(cov_values) + \")\"\n \n if return_cov_vals:\n return (cov_str, cov_values)\n return cov_str\n \n\n@RfunctionCaller\ndef run_continuous_ma(function_name, params, res_name = \"result\", cont_data_name=\"tmp_obj\"):\n params_df = ro.r['data.frame'](**params)\n r_str = \"%s<-%s(%s, %s)\" % (res_name, function_name, cont_data_name, params_df.r_repr())\n print \"\\n\\n(run_continuous_ma): executing:\\n %s\\n\" % r_str\n execute_r_string(r_str)\n result = execute_r_string(\"%s\" % res_name)\n return parse_out_results(result)\n\n\n@RfunctionCaller\ndef run_binary_ma(function_name, params, res_name=\"result\", bin_data_name=\"tmp_obj\"):\n params_df = ro.r['data.frame'](**params)\n r_str = \"%s<-%s(%s, %s)\" % (res_name, function_name, bin_data_name,\\\n params_df.r_repr())\n print \"\\n\\n(run_binary_ma): executing:\\n %s\\n\" % r_str\n execute_r_string(r_str)\n result = execute_r_string(\"%s\" % res_name)\n return parse_out_results(result)\n \ndef _to_R_param_str(param):\n ''' \n Encodes Python parameters for consumption by R. Strings are single quoted,\n booleans cast to all-caps.\n '''\n if isinstance(param, str) or isinstance(param, unicode):\n return \"'%s'\"% param\n elif isinstance(param, bool):\n if param:\n return \"TRUE\"\n return \"FALSE\"\n return param\n \ndef _to_R_params(params):\n '''\n Given a Python dictionary of method arguments, this returns a string\n that represents a named list in R. \n '''\n params_str = []\n for param in params.keys():\n params_str.append(\"'%s'=%s\" % (param, _to_R_param_str(params[param])))\n \n params_str = \"list(\"+ \",\".join(params_str) + \")\"\n return params_str\n\n\n@RfunctionCaller\ndef run_diagnostic_multi(function_names, list_of_params, res_name=\"result\", diag_data_name=\"tmp_obj\"):\n r_params_str = \"list(%s)\" % \",\".join([_to_R_params(p) for p in list_of_params])\n \n execute_r_string(\"list.of.params <- %s\" % r_params_str)\n execute_r_string(\"f.names <- c(%s)\" % \",\".join([\"'%s'\" % f_name for f_name in function_names]))\n result = execute_r_string(\"multiple.diagnostic(f.names, list.of.params, %s)\" % diag_data_name)\n #execute_r_string(\"list.of.params <- %s\" % r_params_str)\n #execute_r_string(\"f.names <- c(%s)\" % \",\".join([\"'%s'\" % f_name for f_name in function_names]))\n #result = execute_r_string(\"multiple.diagnostic(f.names, list.of.params, %s)\" % diag_data_name)\n\n print(\"Got here is run diagnostic multi w/o error\")\n return parse_out_results(result)\n\n# HELPS WITH DEBUGGING\n#def r_statement(statement):\n# print(\"About to execute: %s\" % statement)\n# ro.r(statement)\n\n\n@RfunctionCaller\ndef run_diagnostic_ma(function_name, params, res_name=\"result\", diag_data_name=\"tmp_obj\"):\n params_str = _to_R_params(params)\n\n r_str = \"%s<-%s(%s, %s)\" % \\\n (res_name, function_name, diag_data_name, params_str) \n \n print \"\\n\\n(run_diagnostic_ma): executing:\\n %s\\n\" % r_str\n execute_r_string(r_str)\n result = execute_r_string(\"%s\" % res_name)\n return parse_out_results(result)\n\n\n@RfunctionCaller\ndef load_vars_for_plot(params_path, return_params_dict=False):\n ''' \n loads the three necessary (for plot generation) variables\n into R. we assume a naming convention in which params_path\n is the base, data is stored in *.data, params in *.params\n and result in *.res.\n '''\n for var in (\"data\", \"params\", \"res\"):\n cur_path = \"%s.%s\" % (params_path, var)\n if os.path.exists(cur_path):\n load_in_R(cur_path)\n print \"loaded %s\" % cur_path\n else:\n print \"whoops -- couldn't load %s\" % cur_path\n return False\n\n if return_params_dict:\n robj = execute_r_string(\"params\")\n params_dict = R_parse_tools.recursioner(robj)\n return params_dict\n return True\n\n\n@RfunctionCaller\ndef write_out_plot_data(params_out_path, plot_data_name=\"plot.data\"):\n execute_r_string(\"save.plot.data(%s, '%s')\" % (plot_data_name, params_out_path))\n\n\n@RfunctionCaller\ndef load_in_R(fpath):\n ''' loads what is presumed to be .Rdata into the R environment '''\n execute_r_string(\"load('%s')\" % fpath)\n\n\n@RfunctionCaller\ndef update_plot_params(plot_params, plot_params_name=\"params\",\n write_them_out=False, outpath=None):\n # first cast the params to an R data frame to make it\n # R-palatable\n params_df = ro.r['data.frame'](**plot_params)\n execute_r_string(\"tmp.params <- %s\" % params_df.r_repr())\n \n for param_name in plot_params:\n execute_r_string(\"%s$%s <- tmp.params$%s\" % \\\n (plot_params_name, param_name, param_name))\n\n if write_them_out:\n execute_r_string(\"save(tmp.params, file='%s')\" % outpath)\n\n\n@RfunctionCaller\ndef regenerate_plot_data(om_data_name=\"om.data\", res_name=\"res\", \n plot_params_name=\"params\", plot_data_name=\"plot.data\"):\n \n ####\n # this is crude, but works for now, and easier than making\n # the results_window keep track of why type of data it's\n # displaying. may need to re-think this ain any case for the\n # general case of plots (what 'type' is a mixed analysis, e.g.?)\n ####\n data_type = str(execute_r_string(\"class(%s)\" % om_data_name))\n\n if \"BinaryData\" in data_type:\n execute_r_string(\"plot.data<-create.plot.data.binary(%s, %s, %s)\" % \\\n (om_data_name, plot_params_name, res_name))\n elif \"ContinuousData\" in data_type:\n execute_r_string(\"plot.data<-create.plot.data.continuous(%s, %s, %s)\" % \\\n (om_data_name, plot_params_name, res_name))\n else:\n execute_r_string(\"plot.data<-create.plot.data.diagnostic(%s, %s, %s)\" % \\\n (om_data_name, plot_params_name, res_name))\n\n@RfunctionCaller\ndef generate_reg_plot(file_path, params_name=\"plot.data\"): \n execute_r_string(\"meta.regression.plot(%s, '%s')\" % (params_name, file_path))\n\n\n@RfunctionCaller\ndef generate_forest_plot(file_path, side_by_side=False, params_name=\"plot.data\"):\n if side_by_side:\n print \"generating a side-by-side forest plot...\"\n execute_r_string(\"two.forest.plots(%s, '%s')\" % (params_name, file_path))\n else:\n print(\"generating a forest plot....\")\n execute_r_string(\"forest.plot(%s, '%s')\" % (params_name, file_path))\n\ndef parse_out_results(result):\n # parse out text field(s). note that \"plot names\" is 'reserved', i.e., it's\n # a special field which is assumed to contain the plot variable names\n # in R (for graphics manipulation).\n text_d = {}\n image_var_name_d, image_params_paths_d, image_path_d = {}, {}, {}\n image_order = None\n \n # Turn result into a nice dictionary\n result = dict(zip(list(result.names), list(result)))\n \n\n for text_n, text in result.items():\n # some special cases, notably the plot names and the path for a forest\n # plot. TODO in the case of diagnostic data, we're probably going to \n # need to parse out multiple forest plot param objects...\n print text_n\n print \"\\n--------\\n\"\n if text_n == \"images\":\n image_path_d = R_parse_tools.recursioner(text)\n elif text_n == \"image_order\":\n image_order = list(text)\n elif text_n == \"plot_names\":\n if str(text) == \"NULL\":\n image_var_name_d = {}\n else:\n image_var_name_d = R_parse_tools.recursioner(text)\n elif text_n == \"plot_params_paths\":\n if str(text) == \"NULL\":\n image_params_paths_d = {}\n else:\n image_params_paths_d = R_parse_tools.recursioner(text)\n elif text_n == \"References\":\n references_list = list(text)\n references_list.append('metafor: Viechtbauer, Wolfgang. \"Conducting meta-analyses in R with the metafor package.\" Journal of 36 (2010).')\n references_list.append('OpenMetaAnalyst: Wallace, Byron C., Issa J. Dahabreh, Thomas A. Trikalinos, Joseph Lau, Paul Trow, and Christopher H. Schmid. \"Closing the Gap between Methodologists and End-Users: R as a Computational Back-End.\" Journal of Statistical Software 49 (2012): 5.\"')\n ref_set = set(references_list) # removes duplicates\n \n \n references_str = \"\"\n for i, ref in enumerate(ref_set):\n references_str += str(i+1) + \". \" + ref + \"\\n\"\n \n text_d[text_n] = references_str\n elif text_n == \"weights\":\n text_d[text_n] = make_weights_str(result)\n elif text_n in [\"res\",\"res.info\", \"input_data\",\"input_params\"]: # ignore the special output for OpenMEE (may want to have this in the future for OpenMeta as well)\n pass\n elif \"gui.ignore\" in text_n:\n pass\n else:\n if type(text)==rpy2.robjects.vectors.StrVector:\n text_d[text_n] = text[0]\n else:\n text_d[text_n]=str(text)\n\n to_return = {\"images\":image_path_d,\n \"image_var_names\":image_var_name_d,\n \"texts\":text_d,\n \"image_params_paths\":image_params_paths_d,\n \"image_order\":image_order}\n \n return to_return\n\ndef make_weights_str(results):\n ''' Make a string representing the weights due to each study in the meta analysis '''\n \n # This function assumes that 'weights' and 'input_data' are actually in the results\n if not (\"weights\" in results and \"input_data\" in results and \"input_params\" in results):\n print(\"Uh oh\")\n raise Exception(\"make_weights_str() requires 'weights','input_data', and 'input_params' in the results\")\n \n digits = results[\"input_params\"].rx2(\"digits\")[0]\n digits = int(round(digits))\n weights = list(results[\"weights\"])\n weights = [\"{0:.{digits}f}%\".format(x, digits=digits) for x in weights]\n study_names = list(results[\"input_data\"].do_slot(\"study.names\"))\n \n table,widths = tabulate([study_names, weights],\n sep=\": \", return_col_widths=True,\n align=['L','R'])\n header = \"{0:<{widths[0]}} {1:<{widths[1]}}\".format(\"study names\", \"weights\", widths=widths)\n table = \"\\n\".join([header, table]) + \"\\n\"\n return table\n\n\n# Is this function obsolete?\n@RfunctionCaller\ndef run_binary_fixed_meta_regression(selected_cov, bin_data_name=\"tmp_obj\",\n res_name=\"result\", conf_level=None):\n \n if conf_level is None:\n raise ValueError(\"Confidence level must be specified\")\n \n method_str = \"FE\" \n # equiavlent to params <- list(conf.level=95, digits=3)\n params = {\"conf.level\": conf_level,\n \"digits\": 3,\n \"method\": method_str}\n params_df = ro.r['data.frame'](**params)\n r_str = \"%s<-binary.fixed.meta.regression(%s, %s, %s)\" % \\\n (res_name, bin_data_name, params_df.r_repr(), \"'\"+ selected_cov + \"'\")\n print \"\\n\\n(run_binary_ma): executing:\\n %s\\n\" % r_str\n execute_r_string(r_str)\n result = execute_r_string(\"%s\" % res_name)\n return parse_out_results(result)\n\n\ndef _gen_cov_vals_obj_str(cov, study_ids, dataset): \n values_str, cov_vals = cov_to_str(cov, study_ids, dataset, named_list=False,\n return_cov_vals=True)\n ref_var = cov_vals[0].replace(\"'\", \"\") # arbitrary\n\n ## setting the reference variable to the first entry\n # for now -- this only matters for factors, obviously\n\n r_str = \"new('CovariateValues', cov.name='%s', cov.vals=%s, \\\n cov.type='%s', ref.var='%s')\" % \\\n (cov.name, values_str, TYPE_TO_STR_DICT[cov.data_type], ref_var)\n return r_str\n\n\ndef list_of_cov_value_objects_str(dataset, study_ids, cov_list=None):\n ''' makes r_string of covariate objects with their values '''\n \n r_cov_str = []\n if cov_list is None:\n # then use all covariates that belong to the dataset\n cov_list = dataset.covariates\n for cov in cov_list:\n r_cov_str.append(_gen_cov_vals_obj_str(cov, study_ids, dataset))\n r_cov_str = \"list(\" + \",\".join(r_cov_str) + \")\"\n\n return r_cov_str\n\n\n@RfunctionCaller\ndef run_meta_regression(dataset, study_names, cov_list, metric_name,\n data_name=\"tmp_obj\", results_name=\"results_obj\",\n fixed_effects=False, conf_level=None): \n \n if conf_level is None:\n raise ValueError(\"Confidence level must be specified\")\n \n method_str = \"FE\" if fixed_effects else \"DL\" \n\n # @TODO conf.level, digits should be user-specified\n params = {\"conf.level\": conf_level,\n \"digits\": 3,\n \"method\": method_str,\n \"rm.method\": \"ML\",\n \"measure\": metric_name}\n params_df = ro.r['data.frame'](**params)\n\n # create a lit of covariate objects on the R side\n r_str = \"%s<- meta.regression(%s, %s)\" % \\\n (results_name, data_name, str(params_df.r_repr()))\n\n\n print \"\\n\\n(run_meta_regression): executing:\\n %s\\n\" % r_str\n\n ### TODO -- this is hacky\n\n execute_r_string(r_str)\n result = execute_r_string(\"%s\" % results_name)\n\n if \"try-error\" in str(result):\n # uh-oh, there was an error (but the weird\n # RRunTimeError alluded to above; this is a \n # legit error returned from an R routine)\n return str([msg for msg in result][0])\n \n\n parsed_results = parse_out_results(result)\n\n return parsed_results\n\n\n@RfunctionCaller\ndef run_meta_method_diag(meta_function_name, function_names, list_of_params,\n res_name=\"result\", diag_data_name=\"tmp_obj\"):\n # list of parameter objects\n r_params_str = \"list(%s)\" % \",\".join([_to_R_params(p) for p in list_of_params])\n r_str = \"list.of.params <- %s\" % r_params_str\n print(r_str)\n execute_r_string(r_str)\n # list of function names\n r_str = \"f.names <- c(%s)\" % \",\".join([\"'%s'\" % f_name for f_name in function_names])\n print(r_str)\n execute_r_string(r_str)\n\n multi_meta_function_name = \\\n {\"loo.ma.diagnostic\":\"multiple.loo.diagnostic\",\n \"subgroup.ma.diagnostic\":\"multiple.subgroup.diagnostic\",\n \"cum.ma.diagnostic\":\"multiple.cum.ma.diagnostic\"}[meta_function_name]\n\n r_str = \"%s(f.names, list.of.params, %s)\" % (multi_meta_function_name, diag_data_name)\n print(r_str)\n result = execute_r_string(r_str)\n \n return parse_out_results(result)\n \n\n@RfunctionCaller\ndef run_meta_method(meta_function_name, function_name, params, \\\n res_name=\"result\", data_name=\"tmp_obj\"):\n '''\n Runs a binary `meta` method over the data in the bin_data_name argument\n (on the R side). The meta-method called is specified by the meta_function_name\n argument. \n '''\n params_df = ro.r['data.frame'](**params)\n r_str = \"%s<-%s('%s', %s, %s)\" % \\\n (res_name, meta_function_name, function_name, data_name, params_df.r_repr())\n\n print \"\\n\\n(run_meta_method): executing:\\n %s\\n\" % r_str\n\n execute_r_string(r_str)\n result = execute_r_string(\"%s\" % res_name)\n \n # parse out text field(s). note that \"plot names\" is 'reserved', i.e., it's\n # a special field which is assumed to contain the plot variable names\n # in R (for graphics manipulation).\n return parse_out_results(result) \n\n\ndef _get_c_str_for_col(m, i):\n return \", \".join(_get_col(m, i))\n\ndef _to_strs(v):\n return [str(x) for x in v]\n\ndef _get_col(m, i):\n col_vals = []\n for x in m:\n col_vals.append(x[i])\n return col_vals\n\n\n@RfunctionCaller\ndef diagnostic_effects_for_study(tp, fn, fp, tn, metrics=[\"Spec\", \"Sens\"],\n conf_level=95.0):\n # first create a diagnostic data object\n r_str = \"diag.tmp <- new('DiagnosticData', TP=c(%s), FN=c(%s), TN=c(%s), FP=c(%s))\" % \\\n (tp, fn, tn, fp)\n \n print \"\\n\\n(diagnostic_effects_for_study): executing:\\n %s\\n\" % r_str\n execute_r_string(r_str)\n \n # this will map metrics to est., lower, upper\n effects_dict = {}\n for metric in metrics:\n ###\n # Curiously (annoyingly), updating the params dictionary, then recasting it using the\n # ro.r['data.frame'](** params) call will not overwrite the existing\n # structure on the R side -- i.e., you will keep getting the same metric\n # here. Hence the somewhat ugly strategy of constructing the whole\n # named list on the R side anew on each iteration\n #####\n\n r_res = execute_r_string(\"get.res.for.one.diag.study(diag.tmp, \\\n list('to'='only0', 'measure'='{0}', 'conf.level'={1:.6f}, 'adjust'=.5))\".format(metric, conf_level))\n est, lower, upper = r_res[0][0], r_res[1][0], r_res[2][0]\n calc_estimates = (est, lower, upper)\n disp_estimates = [diagnostic_convert_scale(x, metric) for x in calc_estimates]\n effects_dict[metric] = {\"calc_scale\":calc_estimates, \"display_scale\":disp_estimates}\n \n\n return effects_dict\n \n\n@RfunctionCaller\ndef continuous_effect_for_study(n1, m1, sd1, se1=None, n2=None, m2=None,\n sd2=None, se2=None, metric=\"MD\", two_arm=True,\n conf_level=95.0):\n \n point_est, se = None, None\n if two_arm:\n if not None in [se1, se2] and metric==\"MD\":\n # in this case, we have means & standard errors (but no sample size/ sds)\n # thus we compute the point estimate and se directly\n point_est = m1-m2\n se = math.sqrt(sum([x**2 for x in [se1, se2]]))\n else:\n r_str = \"escalc('%s', n1i=c(%s), n2i=c(%s), m1i=c(%s), m2i=c(%s), sd1i=c(%s), sd2i=c(%s))\" %\\\n (metric, n1, n2, m1, m2, sd1, sd2)\n \n \n effect = execute_r_string(r_str)\n # the first 0 indexes into the study; the second, into the point estimate\n # (the escalc method is general and thus expects an array of studies)\n point_est = effect[0][0]\n se = math.sqrt(effect[1][0])\n else:\n # only one-arm\n point_est = m1\n # eesh; this was only over n until \n # 3/28/13.\n se = sd1/math.sqrt(n1)\n \n alpha = 1.0-(conf_level/100.0)\n r_str = \"abs(qnorm(%s))\" % str(alpha/2.0)\n mult = execute_r_string(r_str)[0]\n #print(\"Alpha:\",alpha)\n #print(\"mult:\" ,mult)\n lower, upper = (point_est-mult*se, point_est+mult*se)\n est_and_ci = (point_est, lower, upper)\n transformed_est_and_ci = continuous_convert_scale(est_and_ci, metric)\n return {\"calc_scale\":est_and_ci, \"display_scale\":transformed_est_and_ci}\n\n\n@RfunctionCaller\ndef effect_for_study(e1, n1, e2=None, n2=None, two_arm=True, \n metric=\"OR\", conf_level=95):\n '''\n Computes a point estimate, lower & upper bound for\n the parametric 2x2 *binary* table data.\n\n @TODO add support for non-normal (e.g., T) distributions\n\n @params\n ===\n e1 -- events in group 1\n n1 -- size of group 1\n e2 -- events in group 2\n n2 -- size of group 2\n --\n '''\n print metric\n r_str = None\n if two_arm:\n # notice that we're using WV's escalc routine here\n r_str = \"escalc(measure='%s', ai=c(%s), n1i=c(%s), ci=c(%s), n2i=c(%s))\" %\\\n (metric, e1, n1, e2, n2)\n else:\n r_str = \"escalc(measure='%s', xi=c(%s), ni=c(%s))\" % (metric, e1, n1) \n \n #print \"calling out to R: %s\" % r_str\n effect = execute_r_string(r_str)\n\n #print \"result: %s\" % effect\n point_est = effect[0][0]\n se = math.sqrt(effect[1][0])\n \n #print \"point_est: \", point_est\n #print \"var:\", effect[1][0]\n\n # scalar for computing confidence interval\n alpha = 1.0-(conf_level/100.0)\n r_str = \"abs(qnorm(%s))\" % str(alpha/2.0)\n mult = execute_r_string(r_str)[0]\n\n # note that the point estimate, lower & upper are all computed\n # and returned on the calculation scale (e.g., log in the case of\n # ratios)\n lower, upper = (point_est-mult*se, point_est+mult*se)\n \n print \"%s, %s, %s\" % (lower, point_est, upper)\n\n # we return both the transformed and untransformed scales here\n est_and_ci = (point_est, lower, upper)\n transformed_est_and_ci = binary_convert_scale(est_and_ci, metric, n1=n1)\n return {\"calc_scale\":est_and_ci, \"display_scale\":transformed_est_and_ci}\n\ndef binary_convert_scale(x, metric_name, convert_to=\"display.scale\", n1=None):\n # convert_to is either 'display.scale' or 'calc.scale'\n return generic_convert_scale(x, metric_name, \"binary\", convert_to, n1)\n \ndef continuous_convert_scale(x, metric_name, convert_to=\"display.scale\"):\n return generic_convert_scale(x, metric_name, \"continuous\", convert_to)\n \ndef diagnostic_convert_scale(x, metric_name, convert_to=\"display.scale\"):\n return generic_convert_scale(x, metric_name, \"diagnostic\", convert_to)\n\n\n@RfunctionCaller\ndef generic_convert_scale(x, metric_name, data_type, convert_to=\"display.scale\", n1=None):\n r_str = \"trans.f <- %s.transform.f('%s')\" % (data_type, metric_name)\n execute_r_string(r_str)\n\n if x is None or x == \"\":\n return None\n islist = isinstance(x, list) or isinstance(x, tuple) # being loose with what qualifies as a 'list' here.\n if islist:\n execute_r_string(\"x <- c%s\" % str(x))\n if metric_name == \"PFT\":\n execute_r_string(\"ni<-c%s\" % str((n1,)*len(x)))\n else:\n execute_r_string(\"x <- %s\" % str(x))\n if metric_name == \"PFT\":\n execute_r_string(\"ni<-%s\" % str(n1))\n \n if metric_name == \"PFT\":\n transformed = execute_r_string(\"trans.f$%s(x=x, ni=ni)\" % convert_to)\n else:\n transformed = execute_r_string(\"trans.f$%s(x)\" % convert_to)\n transformed_ls = [x_i for x_i in transformed]\n if not islist:\n # scalar\n return transformed_ls[0]\n return transformed_ls\n\n\n@RfunctionCaller\ndef turn_off_R_graphics():\n execute_r_string(\"graphics.off()\")\n\n" }, { "alpha_fraction": 0.7277251482009888, "alphanum_fraction": 0.7433649301528931, "avg_line_length": 52.40506362915039, "blob_id": "a262d42f907a3574c1fa64cfc5fd1a41c528ce9a", "content_id": "73f075129e9588ec0bcb83c90aeafe3ec663760b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4220, "license_type": "no_license", "max_line_length": 156, "num_lines": 79, "path": "/src/forms/ui_choose_back_calc_result_form.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'choose_back_calc_result_form.ui'\n#\n# Created: Tue Aug 13 11:04:43 2013\n# by: PyQt4 UI code generator 4.10.2\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt4 import QtCore, QtGui\n\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n def _fromUtf8(s):\n return s\n\ntry:\n _encoding = QtGui.QApplication.UnicodeUTF8\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig, _encoding)\nexcept AttributeError:\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig)\n\nclass Ui_ChooseBackCalcResultForm(object):\n def setupUi(self, ChooseBackCalcResultForm):\n ChooseBackCalcResultForm.setObjectName(_fromUtf8(\"ChooseBackCalcResultForm\"))\n ChooseBackCalcResultForm.resize(482, 221)\n sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.MinimumExpanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(ChooseBackCalcResultForm.sizePolicy().hasHeightForWidth())\n ChooseBackCalcResultForm.setSizePolicy(sizePolicy)\n ChooseBackCalcResultForm.setMinimumSize(QtCore.QSize(480, 0))\n self.verticalLayout = QtGui.QVBoxLayout(ChooseBackCalcResultForm)\n self.verticalLayout.setObjectName(_fromUtf8(\"verticalLayout\"))\n self.info_label = QtGui.QLabel(ChooseBackCalcResultForm)\n sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Preferred)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.info_label.sizePolicy().hasHeightForWidth())\n self.info_label.setSizePolicy(sizePolicy)\n self.info_label.setWordWrap(True)\n self.info_label.setObjectName(_fromUtf8(\"info_label\"))\n self.verticalLayout.addWidget(self.info_label)\n self.line_2 = QtGui.QFrame(ChooseBackCalcResultForm)\n self.line_2.setFrameShape(QtGui.QFrame.HLine)\n self.line_2.setFrameShadow(QtGui.QFrame.Sunken)\n self.line_2.setObjectName(_fromUtf8(\"line_2\"))\n self.verticalLayout.addWidget(self.line_2)\n self.horizontalLayout = QtGui.QHBoxLayout()\n self.horizontalLayout.setObjectName(_fromUtf8(\"horizontalLayout\"))\n self.choice1_btn = QtGui.QRadioButton(ChooseBackCalcResultForm)\n self.choice1_btn.setChecked(True)\n self.choice1_btn.setObjectName(_fromUtf8(\"choice1_btn\"))\n self.horizontalLayout.addWidget(self.choice1_btn)\n self.choice2_btn = QtGui.QRadioButton(ChooseBackCalcResultForm)\n self.choice2_btn.setObjectName(_fromUtf8(\"choice2_btn\"))\n self.horizontalLayout.addWidget(self.choice2_btn)\n self.verticalLayout.addLayout(self.horizontalLayout)\n self.buttonBox = QtGui.QDialogButtonBox(ChooseBackCalcResultForm)\n self.buttonBox.setOrientation(QtCore.Qt.Horizontal)\n self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)\n self.buttonBox.setObjectName(_fromUtf8(\"buttonBox\"))\n self.verticalLayout.addWidget(self.buttonBox)\n\n self.retranslateUi(ChooseBackCalcResultForm)\n QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8(\"accepted()\")), ChooseBackCalcResultForm.accept)\n QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8(\"rejected()\")), ChooseBackCalcResultForm.reject)\n QtCore.QMetaObject.connectSlotsByName(ChooseBackCalcResultForm)\n\n def retranslateUi(self, ChooseBackCalcResultForm):\n ChooseBackCalcResultForm.setWindowTitle(_translate(\"ChooseBackCalcResultForm\", \"Choose Back Calculation Result\", None))\n self.info_label.setText(_translate(\"ChooseBackCalcResultForm\", \"The back-calculation has resulted in two possible sets of choices for the counts.\\n\"\n\"\\n\"\n\"Please choose one from below:\", None))\n self.choice1_btn.setText(_translate(\"ChooseBackCalcResultForm\", \"Choice 1\", None))\n self.choice2_btn.setText(_translate(\"ChooseBackCalcResultForm\", \"Choice 2\", None))\n\n" }, { "alpha_fraction": 0.3410138189792633, "alphanum_fraction": 0.35599079728126526, "avg_line_length": 22.11111068725586, "blob_id": "5dd1bc9e64dc74fd319506222164592e78218ad0", "content_id": "3c57f034dcc2338d3e81d2a3ceb768f8f6973d5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 868, "license_type": "no_license", "max_line_length": 40, "num_lines": 36, "path": "/src/R/HSROC/R/Which_data.R", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "Which_data <-\r\nfunction (RANDOM, data, init, GS) \r\n{\r\n if (GS == FALSE) {\r\n if (RANDOM == TRUE) {\r\n sv = numeric()\r\n sv2 = numeric()\r\n svrs = numeric()\r\n result = list(sv, sv2, svrs)\r\n return(result)\r\n }\r\n else {\r\n sv = init[[1]]\r\n sv2 = init[[2]]\r\n svrs = init[[3]]\r\n result = list(sv, sv2, svrs)\r\n return(result)\r\n }\r\n }\r\n else {\r\n if (RANDOM == TRUE) {\r\n sv = numeric()\r\n sv2 = numeric()\r\n svrs = numeric()\r\n result = list(sv, sv2, svrs)\r\n return(result)\r\n }\r\n else {\r\n sv = init[[1]]\r\n sv2 = init[[2]]\r\n svrs = NULL\r\n result = list(sv, sv2, svrs)\r\n return(result)\r\n }\r\n }\r\n}\r\n" }, { "alpha_fraction": 0.6078112125396729, "alphanum_fraction": 0.6102522611618042, "avg_line_length": 35.969696044921875, "blob_id": "71fe5a8bcc0d18bead21faf9a2a7aa1c4d0c34a8", "content_id": "24f4c5f1e4bffac6181b3bc0d0d18a3161ab30db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1229, "license_type": "no_license", "max_line_length": 90, "num_lines": 33, "path": "/src/meta_subgroup_form.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "from PyQt4.Qt import *\n\nimport forms.ui_cov_subgroup_dlg\nfrom meta_globals import FACTOR\n\nclass MetaSubgroupForm(QDialog, forms.ui_cov_subgroup_dlg.Ui_cov_subgroup_dialog):\n \n def __init__(self, model, parent=None):\n super(MetaSubgroupForm, self).__init__(parent)\n self.model = model\n self.setupUi(self)\n self._populate_combo_box()\n QObject.connect(self.buttonBox, SIGNAL(\"rejected()\"), self.cancel)\n QObject.connect(self.buttonBox, SIGNAL(\"accepted()\"), self.get_selected_cov)\n \n def cancel(self):\n print \"(cancel)\"\n self.reject()\n \n def get_selected_cov(self):\n selected_cov = unicode(self.cov_subgroup_cbo_box.currentText().toUtf8(), \"utf-8\") \n self.parent().meta_subgroup(selected_cov)\n self.accept()\n \n def _populate_combo_box(self):\n studies = self.model.get_studies(only_if_included=True)\n \n for cov in self.model.dataset.covariates:\n if cov.get_data_type() != FACTOR:\n continue\n cov_vals = [study.covariate_dict[cov.name] for study in studies]\n if not None in cov_vals:\n self.cov_subgroup_cbo_box.addItem(cov.name)\n\n " }, { "alpha_fraction": 0.6188303828239441, "alphanum_fraction": 0.631688117980957, "avg_line_length": 68.9117660522461, "blob_id": "3dd2d1909ce184144b31db313feb278b8b08d5d1", "content_id": "4b399ab80cc8634c4499adc8a2001c8ac7e85b1d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 2411, "license_type": "no_license", "max_line_length": 100, "num_lines": 34, "path": "/src/R/HSROC/R/gibbs_sampler_Cpp.R", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "gibbs_sampler_Cpp <-\r\nfunction (iter.num, Gold_Std, Gold_se, Gold_sp, Total, t1, t2, \r\n init.PI, init.S1, init.S2, init.C1, init.C2, n, N, alpha.PI, \r\n beta.PI, n.refstd, n_REFSTD, Sens2.alpha, Sens2.beta, Spec2.alpha, \r\n Spec2.beta, init.alpha, init.theta, init.beta, low.rj, up.rj, \r\n init.THETA, init.sigma.theta, init.sigma.alpha, init.LAMBDA, \r\n prior.LAMBDA.lower, prior.LAMBDA.upper, beta.a, beta.b, prior.THETA.lower, \r\n prior.THETA.upper, low.disp.alpha, up.disp.alpha, low.disp.theta, \r\n up.disp.theta, prior_sig_alpha, prior_sig_theta, refresh) \r\n{\r\n test = .C(\"HSROC\", iter = as.integer(iter.num), gold_std = as.integer(Gold_Std), \r\n gold_se = as.integer(Gold_se), gold_sp = as.integer(Gold_sp), \r\n total = as.integer(Total), t1 = as.integer(t1), t2 = as.integer(t2), \r\n vec_pi = as.double(init.PI), vec_S1 = as.double(init.S1), \r\n vec_S2 = as.double(init.S2), vec_C1 = as.double(init.C1), \r\n vec_C2 = as.double(init.C2), study_samplesize = as.integer(n), \r\n n_studies = as.integer(N), alpha_pi = as.double(alpha.PI), \r\n beta_pi = as.double(beta.PI), refstd = as.integer(n.refstd), \r\n numb_refstd = as.integer(n_REFSTD), sens2_alpha = as.double(Sens2.alpha), \r\n sens2_beta = as.double(Sens2.beta), spec2_alpha = as.double(Spec2.alpha), \r\n spec2_beta = as.double(Spec2.beta), vec_alpha = as.double(init.alpha), \r\n vec_theta = as.double(init.theta), vec_beta = as.double(init.beta), \r\n low_rij = as.double(low.rj), up_rij = as.double(up.rj), \r\n vec_CTHETA = as.double(init.THETA), vec_sigma_theta = as.double(init.sigma.theta), \r\n vec_sigma_alpha = as.double(init.sigma.alpha), vec_LAMBDA = as.double(init.LAMBDA), \r\n LAMBDA_lower = as.double(prior.LAMBDA.lower), LAMBDA_upper = as.double(prior.LAMBDA.upper), \r\n beta_a = as.double(beta.a), beta_b = as.double(beta.b), \r\n CTHETA_lower = as.double(prior.THETA.lower), CTHETA_upper = as.double(prior.THETA.upper), \r\n low_sd_alpha = as.double(low.disp.alpha), up_sd_alpha = as.double(up.disp.alpha), \r\n low_sd_theta = as.double(low.disp.theta), up_sd_theta = as.double(up.disp.theta), \r\n prior_sd_alpha = as.integer(prior_sig_alpha), prior_sd_theta = as.integer(prior_sig_theta), \r\n refresh = as.integer(refresh), breaking_point = as.integer(0))\r\n return(test$breaking_point)\r\n}\r\n" }, { "alpha_fraction": 0.3015218675136566, "alphanum_fraction": 0.3129359483718872, "avg_line_length": 41.60368728637695, "blob_id": "a9ffeb2c68bbefe9cc212b3dd2d3632c325b6b38", "content_id": "cc2aa1b610c6827fb05cbdcc86fe0e830181ae57", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 9462, "license_type": "no_license", "max_line_length": 86, "num_lines": 217, "path": "/src/R/HSROC/R/Restore.R", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "Restore <-\r\nfunction (break_point, gold_std) \r\n{\r\n if (break_point > 0) {\r\n if (break_point == 1) {\r\n alpha = read.table(\"alpha.txt\")\r\n count = length(alpha[, 1])\r\n k = count\r\n x = 0\r\n while (is.na(x) == FALSE) {\r\n x = sum(alpha[k, ])\r\n k = k - 1\r\n }\r\n }\r\n else {\r\n if (break_point == 2) {\r\n theta = read.table(\"theta.txt\")\r\n count = length(theta[, 1])\r\n k = count\r\n x = 0\r\n while (is.na(x) == FALSE) {\r\n x = sum(theta[k, ])\r\n k = k - 1\r\n }\r\n }\r\n else {\r\n if (break_point == 3) {\r\n s1 = read.table(\"Sens1.txt\")\r\n count = length(s1[, 1])\r\n k = count\r\n x = 0\r\n while (is.na(x) == FALSE) {\r\n x = sum(s1[k, ])\r\n k = k - 1\r\n }\r\n }\r\n else {\r\n if (break_point == 4) {\r\n c1 = read.table(\"Spec1.txt\")\r\n count = length(c1[, 1])\r\n k = count\r\n x = 0\r\n while (is.na(x) == FALSE) {\r\n x = sum(c1[k, ])\r\n k = k - 1\r\n }\r\n }\r\n else {\r\n if (break_point == 5) {\r\n pi = read.table(\"PI.txt\")\r\n count = length(pi[, 1])\r\n k = count\r\n x = 0\r\n while (is.na(x) == FALSE) {\r\n x = sum(pi[k, ])\r\n k = k - 1\r\n }\r\n }\r\n else {\r\n if (break_point == 6) {\r\n lambda = read.table(\"LAMBDA.txt\")\r\n count = length(lambda[, 1])\r\n k = count\r\n x = 0\r\n while (is.na(x) == FALSE) {\r\n x = sum(lambda[k, ])\r\n k = k - 1\r\n }\r\n }\r\n else {\r\n if (break_point == 7) {\r\n sig.alph = read.table(\"sigma.alpha.txt\")\r\n count = length(sig.alph[, 1])\r\n k = count\r\n x = 0\r\n while (is.na(x) == FALSE) {\r\n x = sum(sig.alph[k, ])\r\n k = k - 1\r\n }\r\n }\r\n else {\r\n if (break_point == 8) {\r\n ctheta = read.table(\"capital_THETA.txt\")\r\n count = length(ctheta[, 1])\r\n k = count\r\n x = 0\r\n while (is.na(x) == FALSE) {\r\n x = sum(ctheta[k, ])\r\n k = k - 1\r\n }\r\n }\r\n else {\r\n if (break_point == 9) {\r\n sig.thet = read.table(\"sigma.theta.txt\")\r\n count = length(sig.thet[, 1])\r\n k = count\r\n x = 0\r\n while (is.na(x) == FALSE) {\r\n x = sum(sig.thet[k, ])\r\n k = k - 1\r\n }\r\n }\r\n else {\r\n if (break_point == 10) {\r\n beta = read.table(\"beta.txt\")\r\n count = length(beta[, 1])\r\n k = count\r\n x = 0\r\n while (is.na(x) == FALSE) {\r\n x = beta[k, 1]\r\n k = k - 1\r\n }\r\n }\r\n else {\r\n if (break_point == 11) {\r\n s2 = read.table(\"Sens2.txt\")\r\n count = length(s2[, 1])\r\n k = count\r\n x = 0\r\n while (is.na(x) == FALSE) {\r\n x = sum(s2[k, ])\r\n k = k - 1\r\n }\r\n }\r\n else {\r\n if (break_point == 11) {\r\n c2 = read.table(\"Spec2.txt\")\r\n count = length(c2[, 1])\r\n k = count\r\n x = 0\r\n while (is.na(x) == FALSE) {\r\n x = sum(c2[k, ])\r\n k = k - 1\r\n }\r\n }\r\n }\r\n }\r\n }\r\n }\r\n }\r\n }\r\n }\r\n }\r\n }\r\n }\r\n }\r\n vec.alpha = read.table(\"alpha.txt\")[k, ]\r\n vec.theta = as.vector(read.table(\"theta.txt\")[k, ])\r\n vec.S1 = read.table(\"Sens1.txt\")[k, ]\r\n vec.C1 = read.table(\"Spec1.txt\")[k, ]\r\n vec.PI = read.table(\"PI.txt\")[k, ]\r\n vec.LAMBDA = read.table(\"LAMBDA.txt\")[k, 1]\r\n vec.sigma.alpha = read.table(\"sigma.alpha.txt\")[k, 1]\r\n vec.THETA = read.table(\"capital_THETA.txt\")[k, 1]\r\n vec.sigma.theta = read.table(\"sigma.theta.txt\")[k, 1]\r\n vec.beta = read.table(\"beta.txt\")[k, 1]\r\n columns = length(vec.alpha[k, ])\r\n write.table(rbind(vec.alpha, vec.theta, vec.S1, vec.C1, \r\n vec.PI), file = \"Restore.txt\", append = TRUE, row.names = FALSE, \r\n col.names = FALSE)\r\n write(c(vec.LAMBDA, vec.sigma.alpha, vec.THETA, vec.sigma.theta, \r\n vec.beta), file = \"Restore2.txt\", append = TRUE)\r\n write(paste(\"______________________________________________________\"), \r\n file = \"Restore_index.txt\", append = TRUE)\r\n write(paste(\"\\t Restore.txt \"), file = \"Restore_index.txt\", \r\n append = TRUE)\r\n write(paste(\"Row 1 : alpha parameters for all M = \", \r\n columns, \" study(ies)\\t \"), file = \"Restore_index.txt\", \r\n append = TRUE)\r\n write(paste(\"Row 2 : theta parameters for all M = \", \r\n columns, \" study(ies)\\t \"), file = \"Restore_index.txt\", \r\n append = TRUE)\r\n write(paste(\"Row 3 : sensitivity of test under evaluation (S1) for all M = \", \r\n columns, \" study(ies)\\t \"), file = \"Restore_index.txt\", \r\n append = TRUE)\r\n write(paste(\"Row 4 : specificity of test under evaluation (C1) for all M = \", \r\n columns, \" study(ies)\\t \"), file = \"Restore_index.txt\", \r\n append = TRUE)\r\n write(paste(\"Row 5 : prevalence for all M = \", columns, \r\n \" study(ies)\\t \"), file = \"Restore_index.txt\", \r\n append = TRUE)\r\n write(paste(\"______________________________________________________\"), \r\n file = \"Restore_index.txt\", append = TRUE)\r\n write(paste(\"\\t Restore2.txt \"), file = \"Restore_index.txt\", \r\n append = TRUE)\r\n write(paste(\"Column 1 : LAMBDA parameter\\t \"), file = \"Restore_index.txt\", \r\n append = TRUE)\r\n write(paste(\"Column 2 : sigma alpha parameter\\t \"), \r\n file = \"Restore_index.txt\", append = TRUE)\r\n write(paste(\"Column 3 : THETA parameter\\t \"), file = \"Restore_index.txt\", \r\n append = TRUE)\r\n write(paste(\"Column 4 : sigma theta parameter\\t \"), \r\n file = \"Restore_index.txt\", append = TRUE)\r\n write(paste(\"Column 5 : beta parameter\\t \"), file = \"Restore_index.txt\", \r\n append = TRUE)\r\n write(paste(\"______________________________________________________\"), \r\n file = \"Restore_index.txt\", append = TRUE)\r\n if (gold_std == FALSE) {\r\n vec.S2 = read.table(\"Sens2.txt\")[k, ]\r\n vec.C2 = read.table(\"Spec2.txt\")[k, ]\r\n refstd = length(read.table(\"Sens2.txt\")[k, ])\r\n write(t(cbind(vec.S2, vec.C2)), file = \"Restore3.txt\", \r\n append = TRUE, ncolumns = refstd)\r\n write(paste(\"\\t Restore3.txt \"), file = \"Restore_index.txt\", \r\n append = TRUE)\r\n write(paste(\"Row 1 : sensitivity of reference test (S2) \\t \"), \r\n file = \"Restore_index.txt\", append = TRUE)\r\n write(paste(\"Row 2 : specificity of reference test (C2) \\t \"), \r\n file = \"Restore_index.txt\", append = TRUE)\r\n }\r\n }\r\n else {\r\n if (break_point == 0) {\r\n columns = NULL\r\n }\r\n }\r\n}\r\n" }, { "alpha_fraction": 0.4615384638309479, "alphanum_fraction": 0.4615384638309479, "avg_line_length": 12.857142448425293, "blob_id": "eba169ef89ddd23d23e6f529b8c05029e9d0f30b", "content_id": "533dd68b065954f3145d05e177021d5ef7027479", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 104, "license_type": "no_license", "max_line_length": 27, "num_lines": 7, "path": "/src/R/HSROC/R/pi.beta.R", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "pi.beta <-\r\nfunction (a, b) \r\n{\r\n n = length(a)\r\n result = n - sum(a) + b\r\n return(result)\r\n}\r\n" }, { "alpha_fraction": 0.5876107215881348, "alphanum_fraction": 0.5890752077102661, "avg_line_length": 46.25951385498047, "blob_id": "4eefb91a88438f6fbd3c2e51f95018dd33ae3fc2", "content_id": "13160c764dafad6a1b9bdd49f67616c372c804d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13657, "license_type": "no_license", "max_line_length": 117, "num_lines": 289, "path": "/src/edit_dialog.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "##################################\n# \n# Byron C. Wallace \n# Tufts Medical Center \n# OpenMeta[analyst] \n# \n# This form is for 'batch' editing a dataset. Note that any \n# edits apply to *all* MetaAnalyticUnit objects known. So\n# e.g., if a group name is changed, it will be changed \n# *everywhere*.\n#\n# Note also that this form doesn't itself provide any \n# undo/redo functionality. Rather, the strategy is to\n# treat *all* editing done via this form as one\n# undoable action.\n# \n##################################\n\n#import pdb\n\n#from PyQt4.Qt import *\nfrom PyQt4.Qt import QDialog, QObject, SIGNAL\n\nimport forms.ui_edit_dialog\nimport edit_list_models\nimport add_new_dialogs\nimport meta_globals\nimport ma_dataset\n\n\nclass EditDialog(QDialog, forms.ui_edit_dialog.Ui_edit_dialog):\n\n def __init__(self, dataset, parent=None):\n super(EditDialog, self).__init__(parent)\n self.setupUi(self)\n \n ### outcomes\n self.outcomes_model = edit_list_models.OutcomesModel(dataset = dataset)\n self.outcome_list.setModel(self.outcomes_model)\n try:\n index_of_outcome_to_select = self.outcomes_model.outcome_list.index(parent.model.current_outcome)\n outcome_index = self.outcomes_model.createIndex(index_of_outcome_to_select, 0)\n self.outcome_list.setCurrentIndex(outcome_index)\n self.selected_outcome = parent.model.current_outcome\n self.remove_outcome_btn.setEnabled(True)\n except:\n # no outcomes.\n self.selected_outcome = None\n \n ### follow-ups\n # notice that we pass the follow ups model the current outcome, because it will display only\n # those follow-ups included for this outcome\n self.follow_ups_model = edit_list_models.FollowUpsModel(dataset = dataset, outcome = self.selected_outcome)\n self.follow_up_list.setModel(self.follow_ups_model)\n if self.selected_outcome is not None:\n self.selected_follow_up = parent.model.get_current_follow_up_name()\n index_of_follow_up_to_select = self.follow_ups_model.follow_up_list.index(self.selected_follow_up)\n follow_up_index = self.follow_ups_model.createIndex(index_of_follow_up_to_select, 0)\n self.follow_up_list.setCurrentIndex(follow_up_index)\n else:\n self.selected_follow_up = None\n \n \n ### groups\n self.groups_model = edit_list_models.TXGroupsModel(dataset = dataset,\\\n outcome = self.selected_outcome, follow_up = self.selected_follow_up)\n self.group_list.setModel(self.groups_model)\n \n \n ### studies\n # this is sort of hacky; we lop off the last study, which is \n # always 'blank'. this is a recurring, rather annoying issue.\n #self.blank_study = dataset.studies[-1]\n #dataset.studies = dataset.studies[:-1]\n self.studies_model = edit_list_models.StudiesModel(dataset = dataset)\n self.study_list.setModel(self.studies_model)\n \n ### covariates\n self.covariates_model = edit_list_models.CovariatesModel(dataset = dataset)\n self.covariate_list.setModel(self.covariates_model)\n \n self._setup_connections()\n self.dataset = dataset\n \n \n def _setup_connections(self):\n ###\n # groups\n QObject.connect(self.add_group_btn, SIGNAL(\"pressed()\"),\n self.add_group)\n QObject.connect(self.remove_group_btn, SIGNAL(\"pressed()\"),\n self.remove_group)\n QObject.connect(self.group_list, SIGNAL(\"clicked(QModelIndex)\"),\n self.group_selected)\n \n ###\n # outcomes \n QObject.connect(self.add_outcome_btn, SIGNAL(\"pressed()\"),\n self.add_outcome)\n QObject.connect(self.remove_outcome_btn, SIGNAL(\"pressed()\"),\n self.remove_outcome) \n QObject.connect(self.outcome_list, SIGNAL(\"clicked(QModelIndex)\"),\n self.outcome_selected)\n\n\n ###\n # follow-ups\n QObject.connect(self.add_follow_up_btn, SIGNAL(\"pressed()\"),\n self.add_follow_up)\n QObject.connect(self.remove_follow_up_btn, SIGNAL(\"pressed()\"),\n self.remove_follow_up) \n QObject.connect(self.follow_up_list, SIGNAL(\"clicked(QModelIndex)\"),\n self.follow_up_selected)\n \n ###\n # studies\n QObject.connect(self.add_study_btn, SIGNAL(\"pressed()\"),\n self.add_study)\n QObject.connect(self.remove_study_btn, SIGNAL(\"pressed()\"),\n self.remove_study) \n QObject.connect(self.study_list, SIGNAL(\"clicked(QModelIndex)\"),\n self.study_selected)\n \n ###\n # covariates\n QObject.connect(self.add_covariate_btn, SIGNAL(\"pressed()\"),\n self.add_covariate)\n QObject.connect(self.remove_covariate_btn, SIGNAL(\"pressed()\"),\n self.remove_covariate) \n QObject.connect(self.covariate_list, SIGNAL(\"clicked(QModelIndex)\"),\n self.covariate_selected)\n \n \n def add_group(self):\n form = add_new_dialogs.AddNewGroupForm(self)\n form.group_name_le.setFocus() \n if form.exec_():\n new_group_name = unicode(form.group_name_le.text().toUtf8(), \"utf-8\")\n self.group_list.model().dataset.add_group(new_group_name, self.selected_outcome)\n self.group_list.model().refresh_group_list(self.selected_outcome, self.selected_follow_up)\n \n def remove_group(self):\n index = self.group_list.currentIndex()\n selected_group = self.group_list.model().group_list[index.row()]\n self.group_list.model().dataset.delete_group(selected_group)\n self.group_list.model().refresh_group_list(self.selected_outcome, self.selected_follow_up)\n self.group_list.model().reset()\n \n def group_selected(self, index):\n self.disable_remove_buttons()\n self.remove_group_btn.setEnabled(True)\n \n def add_outcome(self):\n form = add_new_dialogs.AddNewOutcomeForm(self, is_diag=self.dataset.is_diag)\n form.outcome_name_le.setFocus()\n if form.exec_():\n # then the user clicked ok and has added a new outcome.\n # here we want to add the outcome to the dataset, and then\n # display it\n new_outcome_name = unicode(form.outcome_name_le.text().toUtf8(), \"utf-8\")\n # the outcome type is one of the enumerated types; we don't worry about\n # unicode encoding\n data_type = str(form.datatype_cbo_box.currentText())\n data_type = meta_globals.STR_TO_TYPE_DICT[data_type.lower()]\n self.outcome_list.model().dataset.add_outcome(ma_dataset.Outcome(new_outcome_name, data_type))\n\n self.outcome_list.model().refresh_outcome_list()\n self.outcome_list.model().current_outcome = new_outcome_name\n \n def get_selected_outcome(self):\n index = self.outcome_list.currentIndex()\n if index.row() < 0 or index.row() > len(self.outcome_list.model().outcome_list):\n return None\n return self.outcome_list.model().outcome_list[index.row()]\n \n def get_selected_covariate(self):\n index = self.covariate_list.currentIndex()\n if index.row() < 0:\n return None\n return self.covariate_list.model().covariates_list[index.row()]\n\n def remove_outcome(self):\n self.selected_outcome = self.get_selected_outcome()\n self.outcome_list.model().dataset.remove_outcome(self.selected_outcome)\n self.outcome_list.model().refresh_outcome_list()\n self.outcome_list.model().reset()\n # now update the selected outcome\n self.selected_outcome = self.get_selected_outcome()\n # update the follow-ups list as appropriate\n if self.selected_outcome is not None:\n self.follow_up_list.model().current_outcome = self.selected_outcome\n print \"\\ncurrent outcome updated. is now: %s\" % self.selected_outcome\n self.follow_up_list.model().refresh_follow_up_list()\n self.selected_follow_up = self.get_selected_follow_up()\n ## also update the groups and follow-up lists\n self.group_list.model().refresh_group_list(self.selected_outcome, self.selected_follow_up)\n else:\n ## the assumption in this case is that all outcomes have been deleted\n # so we clear the follow up and group lists.\n self.follow_up_list.model().follow_up_list = []\n self.follow_up_list.model().reset()\n self.group_list.model().group_list = []\n self.group_list.model().reset()\n\n def outcome_selected(self, index):\n self.selected_outcome = self.get_selected_outcome()\n self.follow_up_list.model().current_outcome = self.selected_outcome\n self.follow_up_list.model().refresh_follow_up_list()\n self.group_list.model().refresh_group_list(self.selected_outcome, self.selected_follow_up)\n ## update\n self.disable_remove_buttons()\n self.remove_outcome_btn.setEnabled(True)\n \n def add_follow_up(self):\n form = add_new_dialogs.AddNewFollowUpForm(self)\n form.follow_up_name_le.setFocus()\n if form.exec_():\n follow_up_lbl = unicode(form.follow_up_name_le.text().toUtf8(), \"utf-8\")\n self.follow_up_list.model().dataset.add_follow_up(follow_up_lbl)\n self.follow_up_list.model().current_outcome =self.selected_outcome\n self.follow_up_list.model().refresh_follow_up_list()\n \n def get_selected_follow_up(self):\n index = self.follow_up_list.currentIndex()\n print \"index is: %s\" % index.row()\n print \"here is the current follow-up list: %s\" % self.follow_up_list.model().follow_up_list\n return self.follow_up_list.model().follow_up_list[index.row()]\n\n def get_selected_study(self):\n index = self.study_list.currentIndex().row()\n return self.study_list.model().dataset.studies[index]\n \n def study_selected(self):\n self.remove_study_btn.setEnabled(True) \n \n def covariate_selected(self):\n self.remove_covariate_btn.setEnabled(True)\n \n def add_covariate(self):\n form = add_new_dialogs.AddNewCovariateForm(self)\n form.covariate_name_le.setFocus()\n if form.exec_():\n new_covariate_name = unicode(form.covariate_name_le.text().toUtf8(), \"utf-8\")\n new_covariate_type = str(form.datatype_cbo_box.currentText())\n cov_obj = ma_dataset.Covariate(new_covariate_name, new_covariate_type)\n self.covariate_list.model().dataset.add_covariate(cov_obj)\n self.covariate_list.model().update_covariates_list()\n \n def remove_covariate(self):\n cov_obj = self.get_selected_covariate()\n self.covariate_list.model().dataset.remove_covariate(cov_obj)\n self.covariate_list.model().update_covariates_list()\n \n def remove_follow_up(self):\n self.selected_follow_up = self.get_selected_follow_up()\n self.follow_up_list.model().dataset.remove_follow_up(self.selected_follow_up)\n self.follow_up_list.model().current_outcome =self.selected_outcome\n self.follow_up_list.model().refresh_follow_up_list()\n \n def follow_up_selected(self, index):\n self.disable_remove_buttons()\n # we want to disallow the user from removing *all* \n # follow-ups for a given outcome, since this would be meaningless.\n # thus we check if there is only follow-up; if so, disable \n # (or rather, don't enable) the remove button\n if len(self.follow_up_list.model().follow_up_list) > 1:\n self.remove_follow_up_btn.setEnabled(True)\n self.selected_follow_up = self.get_selected_follow_up()\n self.group_list.model().refresh_group_list(self.selected_outcome, self.selected_follow_up)\n \n def disable_remove_buttons(self):\n self.remove_group_btn.setEnabled(False)\n self.remove_follow_up_btn.setEnabled(False)\n self.remove_outcome_btn.setEnabled(False)\n \n def add_study(self):\n form = add_new_dialogs.AddNewStudyForm(self)\n form.study_lbl.setFocus()\n if form.exec_():\n study_name = unicode(form.study_lbl.text().toUtf8(), \"utf-8\")\n study_id = self.study_list.model().dataset.max_study_id()+1\n new_study = ma_dataset.Study(study_id, name = study_name)\n self.study_list.model().dataset.add_study(new_study)\n self.study_list.model().update_study_list()\n \n def remove_study(self):\n study = self.get_selected_study()\n self.study_list.model().dataset.studies.remove(study)\n self.study_list.model().update_study_list()" }, { "alpha_fraction": 0.5905292630195618, "alphanum_fraction": 0.5929305553436279, "avg_line_length": 36.18571472167969, "blob_id": "b9d2b887df9764249c11577bcdfac88b3d8b90fc", "content_id": "570f0d5661ad46c40d3da779c1518e2b937a4699", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10411, "license_type": "no_license", "max_line_length": 104, "num_lines": 280, "path": "/src/edit_list_models.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "#################################################################\n#\n# Byron C. Wallace\n# Tufts Medical Center\n# OpenMeta[analyst]\n# ---\n# Proxy interfaces for mediating between the underlying representation (in ma_dataset.py)\n# and the editing UI.\n################################################################\n\n#import pdb\n\n# core libraries\n#from PyQt4.QtCore import *\nfrom PyQt4.QtCore import QAbstractTableModel, QModelIndex, QString, Qt, QVariant\n\nclass TXGroupsModel(QAbstractTableModel):\n '''\n This module mediates between the classes comprising a dataset\n (i.e., study & ma_unit objects) and the view. In particular, we\n subclass the QAbstractTableModel and provide the fields of interest\n to the view.\n '''\n def __init__(self, filename=QString(), dataset=None, outcome=None, follow_up=None):\n super(TXGroupsModel, self).__init__()\n self.dataset = dataset\n self.current_outcome = outcome\n self.current_follow_up = follow_up\n self.refresh_group_list(outcome, follow_up)\n \n def refresh_group_list(self, outcome, follow_up):\n self.group_list = self.dataset.get_group_names_for_outcome_fu(outcome, follow_up)\n print \"\\ngroup names are: %s\" % self.group_list\n self.reset()\n \n def data(self, index, role=Qt.DisplayRole):\n if not index.isValid() or not (0 <= index.row() < len(self.group_list)):\n return QVariant()\n group_name = self.group_list[index.row()]\n if role == Qt.DisplayRole:\n return QVariant(group_name)\n elif role == Qt.TextAlignmentRole:\n return QVariant(int(Qt.AlignLeft|Qt.AlignVCenter))\n return QVariant()\n \n def rowCount(self, index=QModelIndex()):\n return len(self.group_list)\n \n def columnCount(self, index=QModelIndex()):\n return 1\n \n def setData(self, index, value, role=Qt.EditRole):\n old_name = self.group_list[index.row()]\n new_name = unicode(value.toString().toUtf8(), \"utf-8\")\n\n ###\n # we don't allow empty strings for group names; just pass\n # if this happens (typically this will be an accident on the user's part)\n if new_name == \"\":\n return False\n \n self.dataset.change_group_name(old_name, new_name)#, \\\n #outcome=self.current_outcome, follow_up=self.current_follow_up)\n self.refresh_group_list(self.current_outcome, self.current_follow_up)\n return True\n \n def flags(self, index):\n if not index.isValid():\n return Qt.ItemIsEnabled\n return Qt.ItemFlags(QAbstractTableModel.flags(self, index)|\n Qt.ItemIsEditable)\n\n \nclass OutcomesModel(QAbstractTableModel):\n '''\n A simple table model for editing/deleting/adding outcomes.\n Subclasses the QAbstractTableModel and provide the fields of interest\n to the view.\n '''\n def __init__(self, filename=QString(), dataset=None):\n super(OutcomesModel, self).__init__()\n self.dataset = dataset\n self.current_outcome = None\n self.outcome_list = self.dataset.get_outcome_names()\n\n \n def refresh_outcome_list(self):\n self.outcome_list = self.dataset.get_outcome_names()\n self.reset()\n \n def data(self, index, role=Qt.DisplayRole):\n self.outcome_list = self.dataset.get_outcome_names()\n if not index.isValid() or not (0 <= index.row()):\n return QVariant()\n outcome_name = \"\"\n try:\n outcome_name = self.outcome_list[index.row()]\n except:\n pass\n if role == Qt.DisplayRole:\n return QVariant(outcome_name)\n elif role == Qt.TextAlignmentRole:\n return QVariant(int(Qt.AlignLeft|Qt.AlignVCenter))\n return QVariant()\n \n def rowCount(self, index=QModelIndex()):\n return len(self.outcome_list)\n \n def columnCount(self, index=QModelIndex()):\n return 1\n \n def setData(self, index, value, role=Qt.EditRole):\n old_outcome_name = self.outcome_list[index.row()]\n new_outcome_name = unicode(value.toString().toUtf8(), \"utf-8\")\n if new_outcome_name == \"\":\n return False\n \n self.dataset.change_outcome_name(old_outcome_name, new_outcome_name)\n # issue #130: if we change an outcome name, set the current outcome\n # to said outcome\n self.current_outcome = new_outcome_name\n return True\n \n def flags(self, index):\n if not index.isValid():\n return Qt.ItemIsEnabled\n return Qt.ItemFlags(QAbstractTableModel.flags(self, index)|\n Qt.ItemIsEditable)\n \nclass FollowUpsModel(QAbstractTableModel):\n '''\n A simple table model for editing/deleting/adding follow-ups.\n Subclasses the QAbstractTableModel and provide the fields of interest\n to the view.\n '''\n def __init__(self, filename=QString(), dataset=None, outcome = None):\n super(FollowUpsModel, self).__init__()\n self.dataset = dataset\n ## we maintain a current outcome string variable because\n # the follow-ups are outcome specific\n self.current_outcome = outcome\n self.follow_up_list = self.dataset.get_follow_up_names_for_outcome(outcome)\n \n def refresh_follow_up_list(self): \n self.follow_up_list = self.dataset.get_follow_up_names_for_outcome(self.current_outcome)\n self.reset()\n \n def data(self, index, role=Qt.DisplayRole):\n if not index.isValid() or not (0 <= index.row()):\n return QVariant()\n follow_up_name = None\n try:\n follow_up_name = self.follow_up_list[index.row()] \n except:\n pass\n \n if role == Qt.DisplayRole:\n return QVariant(follow_up_name)\n elif role == Qt.TextAlignmentRole:\n return QVariant(int(Qt.AlignLeft|Qt.AlignVCenter))\n return QVariant()\n \n def rowCount(self, index=QModelIndex()):\n return len(self.follow_up_list)\n \n def columnCount(self, index=QModelIndex()):\n return 1\n \n def setData(self, index, value, role=Qt.EditRole):\n old_follow_up_name = self.follow_up_list[index.row()]\n new_follow_up_name = unicode(value.toString().toUtf8(), \"utf-8\")\n self.dataset.change_follow_up_name(self.current_outcome, old_follow_up_name, new_follow_up_name)\n self.refresh_follow_up_list()\n return True\n \n \n def flags(self, index):\n if not index.isValid():\n return Qt.ItemIsEnabled\n return Qt.ItemFlags(QAbstractTableModel.flags(self, index)|\n Qt.ItemIsEditable)\n \nclass StudiesModel(QAbstractTableModel):\n '''\n Table model implementation for studies list.\n '''\n def __init__(self, filename=QString(), dataset=None):\n super(StudiesModel, self).__init__()\n self.dataset = dataset\n self.update_study_list()\n \n def update_study_list(self):\n self.studies_list = self.dataset.studies\n self.reset()\n \n def data(self, index, role=Qt.DisplayRole):\n if not index.isValid() or not (0 <= index.row() < len(self.studies_list)):\n return QVariant()\n study_name = self.studies_list[index.row()].name\n if role == Qt.DisplayRole:\n return QVariant(study_name)\n elif role == Qt.TextAlignmentRole:\n return QVariant(int(Qt.AlignLeft|Qt.AlignVCenter))\n return QVariant()\n \n def rowCount(self, index=QModelIndex()):\n return len(self.studies_list)\n \n def columnCount(self, index=QModelIndex()):\n return 1\n \n def setData(self, index, value, role=Qt.EditRole):\n study_object = self.studies_list[index.row()]\n new_name = unicode(value.toString().toUtf8(), \"utf-8\")\n\n ###\n # we don't allow empty strings for group names; just pass\n # if this happens (typically this will be an accident on the user's part)\n if new_name == \"\":\n return False\n \n study_object.name = new_name\n self.update_study_list()\n return True\n \n def flags(self, index):\n if not index.isValid():\n return Qt.ItemIsEnabled\n return Qt.ItemFlags(QAbstractTableModel.flags(self, index)|\n Qt.ItemIsEditable)\n \nclass CovariatesModel(QAbstractTableModel):\n '''\n Table model implementation for covariates.\n '''\n def __init__(self, filename=QString(), dataset=None):\n super(CovariatesModel, self).__init__()\n self.dataset = dataset\n self.update_covariates_list()\n \n def update_covariates_list(self):\n self.covariates_list = self.dataset.covariates\n self.reset()\n \n def data(self, index, role=Qt.DisplayRole):\n if not index.isValid() or not (0 <= index.row() < len(self.covariates_list)):\n return QVariant()\n cov_name = self.covariates_list[index.row()].name\n if role == Qt.DisplayRole:\n return QVariant(cov_name)\n elif role == Qt.TextAlignmentRole:\n return QVariant(int(Qt.AlignLeft|Qt.AlignVCenter))\n return QVariant()\n \n def rowCount(self, index=QModelIndex()):\n return len(self.covariates_list)\n \n def columnCount(self, index=QModelIndex()):\n return 1\n \n def setData(self, index, value, role=Qt.EditRole):\n cov_object = self.covariates_list[index.row()]\n new_name = unicode(value.toString().toUtf8(), \"utf-8\")\n\n ###\n # we don't allow empty strings for group names; just pass\n # if this happens (typically this will be an accident on the user's part).\n # nor do we allow covariates to have the same name.\n if new_name == \"\" or new_name in self.dataset.get_cov_names():\n return False\n \n self.dataset.change_covariate_name(cov_object, new_name)\n self.update_covariates_list()\n return True\n \n def flags(self, index):\n if not index.isValid():\n return Qt.ItemIsEnabled\n return Qt.ItemFlags(QAbstractTableModel.flags(self, index)|\n Qt.ItemIsEditable)" }, { "alpha_fraction": 0.47690340876579285, "alphanum_fraction": 0.5013693571090698, "avg_line_length": 37.46479034423828, "blob_id": "5822f8e4ee401c4ba154a861f242a40338cdc51b", "content_id": "2011405480616ec744d0fc0efbced26a61234dba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 5477, "license_type": "no_license", "max_line_length": 152, "num_lines": 142, "path": "/src/R/openmetar/R/bivariate.r", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "\n####################################################################\n# examples\n#\n# meta.results <- bivariate.dx.test(TP, FP, FN, TN)\n# meta.results\n#\n#\n# plot.bivariate(meta.results, TP, FP, FN , TN, plot.points = T , \n# data.range = T, line.color = \"red\",\n# weights = T)\n####################################################################\n\nbivariate.dx.test <- function(TP, FP, FN, TN) {\n library(lme4)\n n.studies <- length(TP)\n persons <- TP + FP + FN + TN\n true.positive <- NA\n false.positive <- NA\n false.negative <- NA\n true.negative <- NA\n\n for (i in 1:n.studies) { \n true.positive <- na.exclude(c(true.positive, rep(1, TP[i]))) \n false.positive <- na.exclude(c( false.positive, rep(0, FP[i]))) \n false.negative <- na.exclude(c(false.negative, rep(0, FN[i])))\n true.negative <- na.exclude(c( true.negative, rep(1, TN[i]))) \n }\n \n study.id <- NA\n for (i in 1:n.studies) {\n study.id <- na.exclude(c( study.id , rep(i, TP[i]))) \n } \n for (i in 1:n.studies) {\n study.id <- na.exclude(c( study.id , rep(i, FN[i]))) \n } \n for (i in 1:n.studies) {\n study.id <- na.exclude(c( study.id , rep(i, FP[i]))) \n } \n for (i in 1:n.studies) {\n study.id <- na.exclude(c( study.id , rep(i, TN[i]))) \n } \n\n group.id.disease <- rep(1, length(true.positive) + length(false.negative))\n group.id.nodisease <- rep(0, length(false.positive) + length(true.negative)) \n group.id1 <- c(group.id.disease , group.id.nodisease) \n group.id2 <- c(group.id.disease -1 , group.id.nodisease + 1)\n \n results <- c(true.positive ,false.negative, false.positive, true.negative)\n data.reshape <- data.frame(results, group.id1 , group.id2, study.id)\n \n # need to change after updated to lme4 1.0.5 from lme4 0.99\n # model <- lmer( results ~ 0 + group.id1 + group.id2 + \n # ( 0 + group.id1 + group.id2 |study.id) ,\n # family = binomial , data = data.reshape, nAGQ = 3)\n model <- glmer( results ~ 0 + group.id1 + group.id2 + \n ( 0 + group.id1 + group.id2 |study.id) ,\n family = binomial , data = data.reshape, nAGQ = 1) \n\n # same lme4 0.99 --> lme4 1.0.5 issue\n #logit_sens <- model@fixef[1]\n #logist_spec <- model@fixef[2]\n logit_sens <- model@beta[1]\n logist_spec <- model@beta[2]\n\n stde <- coef(summary(model))[, \"Std. Error\"]\n\n se_logit_sens <- stde[1]\n se_logit_spec <- stde[2]\n\n results <- VarCorr(model)\n var_sens <- results$study.id[1,1]\n var_spec <- results$study.id[2,2]\n covar <- results$study.id[2,1]\n correlation <- covar / (var_sens * var_spec)^0.5\n\n meta.results <- data.frame(logit_sens, logist_spec, se_logit_sens, se_logit_spec, \n var_sens, var_spec, covar, correlation)\n\n row.names(meta.results) <- \"estimates\"\n\n meta.results\n}\n\n\n\n## plotting an HSROC based on the bivariate model\nplot.bivariate <- function(meta.results, TP, FP, FN, TN, plot.points=TRUE, \n data.range=TRUE, line.color = \"red\", weights = TRUE, scale = 0.01,\n filepath=\"./r_tmp/bivariate\") {\n slope <- (meta.results[1,5] / meta.results[1,6])^0.5\n intercept <- meta.results[1,1] + meta.results[1,2]*(meta.results[1,5] /meta.results[1,6])^0.5\n\n # note that this assumes counts have been zero-corrected\n sens <- TP / (TP +FN)\n fnr <- 1- TN / (TN + FP)\n total <- TP + FP + FN + TN\n \n if (data.range == TRUE) {\n low = min(fnr)\n high = max(fnr)\n } \n if (data.range == FALSE) {\n low = 0\n high = 1 \n }\n \n if (weights == TRUE) {\n total = total\n scale = scale\n }\n else {\n total = 1\n scale = 1\n }\n \n if (plot.points == TRUE) {\n png(file = paste(filepath, \".png\", sep=\"\"), height=960, width=960)\n plot(sens ~ fnr , asp=1, ylim = c(0,1) , xlim = c(0,1), ylab = \"Sensitivity\", xlab = \"1 - Specificity\", cex = scale*total) \n curve( 1/ ( 1 + exp(-(intercept + slope*log(x/(1-x) ) ) ) ) \n , add = TRUE, xlim = c(low, high) , col = line.color)\n dev.off()\n\n pdf(file = paste(filepath, \".pdf\", sep=\"\"))\n plot(sens ~ fnr , asp=1, ylim = c(0,1) , xlim = c(0,1), ylab = \"Sensitivity\", xlab = \"1 - Specificity\", cex = scale*total) \n curve( 1/ ( 1 + exp(-(intercept + slope*log(x/(1-x) ) ) ) ) \n , add = TRUE, xlim = c(low, high) , col = line.color)\n dev.off()\n } \n if (plot.points == FALSE) {\n png(file = paste(filepath, \".png\", sep=\"\"), height=960, width=960)\n plot(sens ~ fnr , asp=1, ylim = c(0,1) , xlim = c(0,1), ylab = \"Sensitivity\", xlab = \"1 - Specificity\", col = \"white\" ) \n curve( 1/ ( 1 + exp(-(intercept + slope*log(x/(1-x) ) ) ) ), xlim = c(low, high), ylab = \"\", xlab = \"\", add = TRUE, col = line.color )\n dev.off() \n\n pdf(file = \"bivariate_meta.pdf\")\n plot(sens ~ fnr , asp=1, ylim = c(0,1) , xlim = c(0,1), ylab = \"Sensitivity\", xlab = \"1 - Specificity\", col = \"white\" ) \n curve( 1/ ( 1 + exp(-(intercept + slope*log(x/(1-x) ) ) ) ), xlim = c(low, high), ylab = \"\", xlab = \"\", add = TRUE, col = line.color )\n dev.off() \n }\n\n\n}\n \n \n\n\n\n\n" }, { "alpha_fraction": 0.6875447630882263, "alphanum_fraction": 0.7063207626342773, "avg_line_length": 54.816001892089844, "blob_id": "a5b8920898f5ab049177af2350bacd3164f1a150", "content_id": "f38f7977389c7762b71e7713b8060f2cecac3cce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6977, "license_type": "no_license", "max_line_length": 247, "num_lines": 125, "path": "/src/forms/ui_welcome_page.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'welcome_page.ui'\n#\n# Created: Thu Jun 27 10:21:34 2013\n# by: PyQt4 UI code generator 4.10.1\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt4 import QtCore, QtGui\n\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n def _fromUtf8(s):\n return s\n\ntry:\n _encoding = QtGui.QApplication.UnicodeUTF8\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig, _encoding)\nexcept AttributeError:\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig)\n\nclass Ui_WizardPage(object):\n def setupUi(self, WizardPage):\n WizardPage.setObjectName(_fromUtf8(\"WizardPage\"))\n WizardPage.resize(350, 230)\n sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(WizardPage.sizePolicy().hasHeightForWidth())\n WizardPage.setSizePolicy(sizePolicy)\n WizardPage.setMinimumSize(QtCore.QSize(350, 230))\n WizardPage.setMaximumSize(QtCore.QSize(350, 230))\n self.verticalLayout_2 = QtGui.QVBoxLayout(WizardPage)\n self.verticalLayout_2.setObjectName(_fromUtf8(\"verticalLayout_2\"))\n self.horizontalLayout = QtGui.QHBoxLayout()\n self.horizontalLayout.setObjectName(_fromUtf8(\"horizontalLayout\"))\n spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)\n self.horizontalLayout.addItem(spacerItem)\n self.verticalLayout = QtGui.QVBoxLayout()\n self.verticalLayout.setObjectName(_fromUtf8(\"verticalLayout\"))\n self.create_new_btn = QtGui.QPushButton(WizardPage)\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(_fromUtf8(\":/function_icon_set/function_icon_set/add_48.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.create_new_btn.setIcon(icon)\n self.create_new_btn.setObjectName(_fromUtf8(\"create_new_btn\"))\n self.verticalLayout.addWidget(self.create_new_btn)\n self.import_csv_btn = QtGui.QPushButton(WizardPage)\n icon1 = QtGui.QIcon()\n icon1.addPixmap(QtGui.QPixmap(_fromUtf8(\":/function_icon_set/function_icon_set/box_download_48.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.import_csv_btn.setIcon(icon1)\n self.import_csv_btn.setObjectName(_fromUtf8(\"import_csv_btn\"))\n self.verticalLayout.addWidget(self.import_csv_btn)\n self.open_recent_btn = QtGui.QPushButton(WizardPage)\n icon2 = QtGui.QIcon()\n icon2.addPixmap(QtGui.QPixmap(_fromUtf8(\":/function_icon_set/function_icon_set/folder_48.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.open_recent_btn.setIcon(icon2)\n self.open_recent_btn.setObjectName(_fromUtf8(\"open_recent_btn\"))\n self.verticalLayout.addWidget(self.open_recent_btn)\n self.open_btn = QtGui.QPushButton(WizardPage)\n self.open_btn.setIcon(icon2)\n self.open_btn.setObjectName(_fromUtf8(\"open_btn\"))\n self.verticalLayout.addWidget(self.open_btn)\n self.horizontalLayout.addLayout(self.verticalLayout)\n spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)\n self.horizontalLayout.addItem(spacerItem1)\n self.verticalLayout_2.addLayout(self.horizontalLayout)\n spacerItem2 = QtGui.QSpacerItem(20, 9, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)\n self.verticalLayout_2.addItem(spacerItem2)\n self.line = QtGui.QFrame(WizardPage)\n self.line.setFrameShape(QtGui.QFrame.HLine)\n self.line.setFrameShadow(QtGui.QFrame.Sunken)\n self.line.setObjectName(_fromUtf8(\"line\"))\n self.verticalLayout_2.addWidget(self.line)\n self.horizontalLayout_2 = QtGui.QHBoxLayout()\n self.horizontalLayout_2.setObjectName(_fromUtf8(\"horizontalLayout_2\"))\n self.oma_onlineLabel = QtGui.QLabel(WizardPage)\n self.oma_onlineLabel.setMinimumSize(QtCore.QSize(120, 0))\n self.oma_onlineLabel.setAlignment(QtCore.Qt.AlignCenter)\n self.oma_onlineLabel.setOpenExternalLinks(True)\n self.oma_onlineLabel.setObjectName(_fromUtf8(\"oma_onlineLabel\"))\n self.horizontalLayout_2.addWidget(self.oma_onlineLabel)\n self.line_3 = QtGui.QFrame(WizardPage)\n self.line_3.setFrameShape(QtGui.QFrame.VLine)\n self.line_3.setFrameShadow(QtGui.QFrame.Sunken)\n self.line_3.setObjectName(_fromUtf8(\"line_3\"))\n self.horizontalLayout_2.addWidget(self.line_3)\n self.how_to_citeLabel = QtGui.QLabel(WizardPage)\n self.how_to_citeLabel.setEnabled(False)\n self.how_to_citeLabel.setMinimumSize(QtCore.QSize(80, 0))\n self.how_to_citeLabel.setAlignment(QtCore.Qt.AlignCenter)\n self.how_to_citeLabel.setOpenExternalLinks(True)\n self.how_to_citeLabel.setObjectName(_fromUtf8(\"how_to_citeLabel\"))\n self.horizontalLayout_2.addWidget(self.how_to_citeLabel)\n self.line_4 = QtGui.QFrame(WizardPage)\n self.line_4.setFrameShape(QtGui.QFrame.VLine)\n self.line_4.setFrameShadow(QtGui.QFrame.Sunken)\n self.line_4.setObjectName(_fromUtf8(\"line_4\"))\n self.horizontalLayout_2.addWidget(self.line_4)\n self.helpLabel = QtGui.QLabel(WizardPage)\n self.helpLabel.setMinimumSize(QtCore.QSize(70, 0))\n self.helpLabel.setAlignment(QtCore.Qt.AlignCenter)\n self.helpLabel.setOpenExternalLinks(True)\n self.helpLabel.setObjectName(_fromUtf8(\"helpLabel\"))\n self.horizontalLayout_2.addWidget(self.helpLabel)\n self.verticalLayout_2.addLayout(self.horizontalLayout_2)\n\n self.retranslateUi(WizardPage)\n QtCore.QMetaObject.connectSlotsByName(WizardPage)\n\n def retranslateUi(self, WizardPage):\n WizardPage.setWindowTitle(_translate(\"WizardPage\", \"WizardPage\", None))\n WizardPage.setTitle(_translate(\"WizardPage\", \"What would you like to do?\", None))\n self.create_new_btn.setText(_translate(\"WizardPage\", \"Create a new Project\", None))\n self.import_csv_btn.setText(_translate(\"WizardPage\", \"Import CSV\", None))\n self.open_recent_btn.setText(_translate(\"WizardPage\", \"open recent ...\", None))\n self.open_btn.setText(_translate(\"WizardPage\", \"Open an existing project\", None))\n self.oma_onlineLabel.setText(_translate(\"WizardPage\", \"<html><head/><body><p><a href=\\\"http://www.cebm.brown.edu/open_meta\\\"><span style=\\\" text-decoration: underline; color:#0000ff;\\\">OpenMeta Website</span></a></p></body></html>\", None))\n self.how_to_citeLabel.setText(_translate(\"WizardPage\", \"<a href=\\\"www.google.com\\\">How to cite</a>\", None))\n self.helpLabel.setText(_translate(\"WizardPage\", \"<a href=\\\"http://tuftscaes.org/open_meta/help/openMA_help.html\\\">Help</a>\", None))\n\nimport icons_rc\n" }, { "alpha_fraction": 0.665484607219696, "alphanum_fraction": 0.6708037853240967, "avg_line_length": 28.675437927246094, "blob_id": "df56428d00c431b3a2f61b5518dda72ff5360ac2", "content_id": "3c23d0d1b59e433b6e638ba6e1dd1cf1fe6492a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 3384, "license_type": "no_license", "max_line_length": 145, "num_lines": 114, "path": "/src/R/openmetar/R/permutation.r", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "############################################################################\n# permuted.ma\n#\n# Description: Performs a permuted (a.k.a. randomized even though it's not \n# \trandom) meta analysis \n#\n# Meta-analysis parameters:\n#\tdata: data frame of the data with columns:\n#\t\tyi: effect size\n#\t\tvi: variance\n#\t\tslab: study labels \n#\tmethod: character string specifying whether a fixed- or a\n#\t\trandom/mixed-effects model should be fitted.\n#\t\tFixed-effects model: \"FE\"\n#\t\tRandom-effects model one of: \"DL\", \"HE\", \"SJ\", \"ML\",\n#\t\t\t\"REML\", \"EB\", or \"HS\"\n#\tlevel: confidence interval level (0-100)\n#\tdigits: # of decimal places to which the printed results should be rounded.\n#\tbtt:\n#\tknha: \n#\n# Permuation parameters:\n#\texact:\n#\titer:\n#\tretpermdist:\n#\tmakepermdisthist:\n#\n############################################################################\npermuted.ma <- function(\n\t# meta-analysis parameters\n\tdata, method, intercept=TRUE, level=95, digits=4, knha=FALSE, weighted=TRUE,\n\t# Permutation parameters\n\texact=FALSE, iter=1000, retpermdist=FALSE) {\n\t\n\tma.res <- rma.uni(yi, vi,\n\t\tintercept=intercept,\n\t\tdata=data,\n\t\tslab=data$slab, \n\t\tmethod=method,\n\t\tknha=knha,\n\t\tlevel=level,\n\t\tdigits=digits,\n\t\tweighted=weighted)\n\n\tperm.res <- permutest(ma.res, exact=exact, iter=iter,\n retpermdist=FALSE, digits=digits)\n\tsummary <- paste(capture.output(perm.res), collapse=\"\\n\")\n\n\tresults <- list(\n\t\t\"Summary\"=summary,\n\t\t\"res\"=perm.res,\n\t\t\"res.info\"=permutest.value.info(retpermdist, meta.reg.mode=FALSE)\n\t\t)\n}\n\npermuted.meta.reg <- function (\n\t# meta-regresion parameters\n\tdata, method, mods, intercept=TRUE, level=95, digits=4, knha=FALSE, btt=NULL,\n\t# Permutation parameters\n\texact=FALSE, iter=1000, retpermdist=FALSE,\n\t# Other parameters\n\tinclude.meta.reg.summary=TRUE # show regular meta regression results too in output\n\t) {\n\n\tmods.str <- make.mods.str(mods)\n\t\n\t# obtain regression result rma.uni\n\treg.res <- regression.wrapper(data, mods.str, method, level, digits, btt)\n\n\tperm.res <- permutest(reg.res, exact=exact, iter=iter,\n retpermdist=retpermdist, digits=digits)\n\tsummary <- paste(capture.output(perm.res), collapse=\"\\n\")\n\n\tresults <- list(\n\t\t\"Permuted Meta-Regression Summary\"=summary,\n\t\t\"res\"=perm.res,\n\t\t\"res.info\"=permutest.value.info(retpermdist)\n\t\t)\n\n\tif (include.meta.reg.summary) {\n\t\tmeta.reg.result <- g.meta.regression(\n\t\t\tdata=data,\n\t\t\tmods=mods,\n\t\t\tmethod=method,\n\t\t\tlevel=level,\n\t\t\tdigits=digits,\n\t\t\tmeasure=NULL,\n\t\t\tbtt=btt,\n\t\t\tmake.coeff.forest.plot=FALSE,\n\t\t\texclude.intercept=FALSE, # For coefficient forest plot\n\t\t\tdisable.plots=TRUE\n\t\t)\n\n\t\tresults <- c(list('Standard Meta Regression Summary'=meta.reg.result$Summary), results)\n\t}\n\n\tresults\n}\n\npermutest.value.info <- function(retpermdist, meta.reg.mode=TRUE) {\n\tinfo = list(\n\t\t\tpval = list(type=\"vector\", description='p-value(s) based on the permutation test.'),\n\t\t\tQMp = list(type=\"vector\", description='p-value for the omnibus test of coefficients based on the permutation test.')\n\t)\n\t\n\tif (retpermdist && meta.reg.mode) {\n\t\tadditional.info = list(\n\t\t\t\tzval.perm = list(type=\"data.frame\", description='values of the test statistics of the coefficients under the various permutations'),\n\t\t\t\tQM.perm = list(type=\"vector\", description='values of the test statistic for the omnibus test of coefficients under the various permutations')\n\t\t)\n\t\tinfo = c(info, additional.info)\n\t}\n\treturn(info)\n}\n\n" }, { "alpha_fraction": 0.7041139006614685, "alphanum_fraction": 0.7262658476829529, "avg_line_length": 27.727272033691406, "blob_id": "748b581747c9b446126c88fcdc6c1db8f39d791b", "content_id": "69784bb7ecb77b74aecbdc29cc707c9e08d9fbe2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 632, "license_type": "no_license", "max_line_length": 117, "num_lines": 22, "path": "/src/R/openmetar/man/openmetar-package.Rd", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "\\name{openmetar-package}\n\\alias{openmetar-package}\n\\alias{openmetar}\n\\docType{package}\n\\title{R component for Open Meta-Analyst}\n\\description{openmetar is the R component for Open Meta-Analyst, a graphical user interface (GUI) for meta-analysis. \nThe download package for the GUI is available at http://tuftscaes.org/open_meta/}\n\\details{\n\\tabular{ll}{\nPackage: \\tab openmetar\\cr\nType: \\tab Package\\cr\nVersion: \\tab 1.0\\cr\nDate: \\tab 2010-12-02\\cr\nLicense: \\tab GPL (>= 2)\\cr\nLazyLoad: \\tab yes\\cr\nDepends: \\tab R (>= 2.9.2), grid, igraph, metafor\\cr\n}\n\\author{Author: Byron Wallace <[email protected]>\n\nMaintainer: Paul Trow <[email protected]>\n}\n\\references{}\n" }, { "alpha_fraction": 0.6193771362304688, "alphanum_fraction": 0.625, "avg_line_length": 37.516666412353516, "blob_id": "904acc015510473a586ac3c9d18ae418eb055f18", "content_id": "f81cb51b6944564e6a1f1f60e91a3ac27b3d3fd4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2312, "license_type": "no_license", "max_line_length": 90, "num_lines": 60, "path": "/src/prelaunch.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "# NOTE: THIS SCRIPT is adapted FROM site-packages/cx_Freeze/initscripts/Console.py\n#------------------------------------------------------------------------------\n# Console.py\n# Initialization script for cx_Freeze which manipulates the path so that the\n# directory in which the executable is found is searched for extensions but\n# no other directory is searched. It also sets the attribute sys.frozen so that\n# the Win32 extensions behave as expected.\n#------------------------------------------------------------------------------\n\ndef set_rpy2_env_variables():\n print (\"DIR_NAME: %s\" % DIR_NAME)\n\n # assumes R is in the Contents/Frameworks directory of the app\n #contents_app_dir = os.path.dirname(DIR_NAME)\n #R_HOME_DIR = os.path.join(contents_app_dir,'Frameworks','R.framework','Resources')\n R_HOME_DIR = os.path.join(DIR_NAME,'R')\n \n renv_var = { # environment variable settings\n 'R_HOME': R_HOME_DIR,\n 'R_SHARE_DIR': os.path.join(R_HOME_DIR,'share'),\n 'R_INCLUDE_DIR': os.path.join(R_HOME_DIR,'include'),\n 'R_DOC_DIR': os.path.join(R_HOME_DIR,'doc'),\n 'R_ARCH': '',\n }\n preferred_keyorder = ['R_HOME', 'R_SHARE_DIR', 'R_INCLUDE_DIR', 'R_DOC_DIR', 'R_ARCH']\n print(\"Setting rpy2/R environment variables as follows: %s\" % renv_var)\n\n for environment_variable,value in renv_var.items():\n os.environ[environment_variable] = value\n\n for environment_variable in renv_var.keys():\n print(\"%s is now %s\" % (environment_variable,os.environ[environment_variable]))\n\n\nimport os\nimport sys\nimport zipimport\n\nsys.frozen = True\nsys.path = sys.path[:4]\n\nos.environ[\"TCL_LIBRARY\"] = os.path.join(DIR_NAME, \"tcl\")\nos.environ[\"TK_LIBRARY\"] = os.path.join(DIR_NAME, \"tk\")\nset_rpy2_env_variables()\n\nm = __import__(\"__main__\")\nimporter = zipimport.zipimporter(INITSCRIPT_ZIP_FILE_NAME)\nif INITSCRIPT_ZIP_FILE_NAME != SHARED_ZIP_FILE_NAME:\n moduleName = m.__name__\nelse:\n name, ext = os.path.splitext(os.path.basename(os.path.normcase(FILE_NAME)))\n moduleName = \"%s__main__\" % name\ncode = importer.get_code(moduleName)\nexec(code, m.__dict__)\n\nversionInfo = sys.version_info[:3]\nif versionInfo >= (2, 5, 0) and versionInfo <= (2, 6, 4):\n module = sys.modules.get(\"threading\")\n if module is not None:\n module._shutdown()\n\n" }, { "alpha_fraction": 0.6734567880630493, "alphanum_fraction": 0.6790123581886292, "avg_line_length": 49.5625, "blob_id": "ee7425a11c1e932ba8233df802b124305ae293b7", "content_id": "5ce4428c7cb74771cece109d562690cd380447cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 1620, "license_type": "no_license", "max_line_length": 401, "num_lines": 32, "path": "/doc/starting.html", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "<html>\n<head>\n<link href=\"css/help.css\" \ntype=text/css rel=stylesheet>\n</head>\n<body>\n <h2><a id=\"top\">Starting Open Meta-Analyst</a></h2>\n <p>To start Open Meta-Analyst, open the folder where you \n extracted the Open Meta-Analyst program files and double-click the file\n <p><img src=\"images/OpenMetaAnalyst_icon.png\"></p>\n <p>This opens the Welcome to OpenMeta dialog, as shown below.</p> \n <p><img \n src=\"images/welcome.png\"></p> \n \n <p>In the dialog, you can create a new data set or open an existing data set. \n As an example, open the sample data set amino.oma by the following steps:</p>\n <ol>\n <li>Click the <b>open</b> button.</li>\n <li>In the Open File dialog, double-click the <b>sample_data</b> directory.</li>\n <li>Select the file amino.oma.</li>\n <li>Click <b>Open</b>.</li>\n </ol>\n <p>This opens the main Open Meta-Analyst window, as shown below.</p>\n <p><img src=\"images/main_window.png\"></p>\n <p>The next section describes the <a href=\"main_window.html\">Open Meta-Analyst window</a> in detail.</p>\n <p><b>Tip:</b> You can create a shortcut to Open Meta-Analyst by dragging the OpenMetaAnalyst.exe icon onto the desktop.</p>\n<p><a href=\"#top\">Back to top</a></p>\n<br>\n <table id=\"nav\" cellspacing=\"0\"><tr valign=\"center\"><td align=\"left\" width=\"20\"><a href=\"introduction.html#top\"><img src=\"images/b_prev.gif\" border=\"0\"></a></td><td align=\"left\">Introducing Open Meta-Analyst</td><td>&nbsp;</td><td align=\"right\">The Open Meta-Analyst Window</td><td align=\"right\" width=\"20\"><a href=\"main_window.html#top\"><img src=\"images/b_next.gif\" border=\"0\"></a></td></tr></table>\n<br>\n</body>\n </html>\n " }, { "alpha_fraction": 0.5788128972053528, "alphanum_fraction": 0.5811152458190918, "avg_line_length": 41.045745849609375, "blob_id": "ecdc2a2f16063aec3d8da6fbe1261f267afd5641", "content_id": "2e47f0be3556832d0f2f9771937d7280c77009df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 39524, "license_type": "no_license", "max_line_length": 165, "num_lines": 940, "path": "/src/ma_data_table_view.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "#############################################################\n# Byron C. Wallace #\n# George Dietz #\n# CEBM @ Brown #\n# OpenMeta(analyst) #\n# #\n# #\n# Custom QTableView, implements copy/paste and undo/redo. #\n#############################################################\n\nprint(\"In ma_data_table_view: Importing pyqt4 stuff\")\nfrom PyQt4 import QtCore, QtGui\nfrom PyQt4.Qt import *\n\nprint(\"In ma_data_table_view: importing forms\")\nimport binary_data_form\nimport continuous_data_form\nimport diagnostic_data_form\n\n# it's a questionable practice to import the\n# underlying model into the view, but sometimes\n# it's easiest to manipulate the model directly\n# on interaction rather than that table_model\n# intermediary \nimport ma_dataset\nfrom ma_dataset import *\nfrom meta_globals import *\n\n# for issue #169 -- normalizing new lines, e.g., for pasting\n# use QRegExp to manipulate QStrings (rather than re)\n_newlines_re = QRegExp('(\\r\\n|\\r|\\r)')\n\n\ndef DebugHelper(function):\n def _DebugHelper(*args, **kw):\n print(\"Entered %s\" % function.func_name)\n res = function(*args, **kw)\n print(\"Left %s\" % function.func_name)\n return res\n return _DebugHelper\n\nclass MADataTable(QtGui.QTableView):\n\n def __init__(self, parent=None):\n QWidget.__init__(self, parent)\n \n # the main gui is assumed to be the form\n # that owns this table view, i.e., the 'main'\n # user interface/form. it is assumed that this\n # is set elsewhere.\n self.main_gui = None\n \n # None maps to the special, no outcome/no follow up\n # undo stack\n self.undo_stack_dict = {None:QUndoStack(self)}\n self.undoStack = QUndoStack(self)\n\n header = self.horizontalHeader()\n self.connect(header, SIGNAL(\"sectionClicked(int)\"), self.header_clicked)\n\n self.vert_header = self.verticalHeader()\n \n self.connect(self.vert_header, SIGNAL(\"sectionClicked(int)\"),\n self.row_header_clicked)\n \n ## TODO need to add covariate indices here, as needed\n self.reverse_column_sorts = {0: False, 1: False}\n self.setAlternatingRowColors(True)\n\n ### horizontal (row) header\n self.contextMenuEvent = self._make_context_menu()\n\n ### vertical (column) header\n headers = self.horizontalHeader()\n headers.setContextMenuPolicy(Qt.CustomContextMenu)\n headers.customContextMenuRequested.connect(self.header_context_menu)\n\n def _make_context_menu(self):\n def _context_menu(event):\n context_menu = QMenu(self)\n study_index = self.rowAt(event.y())\n\n ### if this is a dummy row, it doesn't make\n # sense to provide a context-menu\n if study_index >= len(self.model().dataset.studies):\n return None\n\n ### delete study\n study = self.model().dataset.studies[study_index]\n action = QAction(\"delete study %s\" % study.name, self)\n QObject.connect(action, SIGNAL(\"triggered()\"), \\\n lambda : self.main_gui.delete_study(study, study_index=study_index))\n context_menu.addAction(action)\n\n ### copy\n action = QAction(\"copy\", self)\n QObject.connect(action, SIGNAL(\"triggered()\"), self.copy)\n context_menu.addAction(action)\n\n ### paste\n action = QAction(\"paste\", self)\n QObject.connect(action, SIGNAL(\"triggered()\"), self.paste)\n context_menu.addAction(action)\n\n pos = event.globalPos()\n context_menu.popup(pos)\n event.accept()\n\n return _context_menu\n\n def header_context_menu(self, pos):\n '''\n here is where the context menus for column header\n right-clicks are built.\n '''\n column_clicked = self.columnAt(pos.x())\n covariate_columns = self.get_covariate_columns()\n raw_data_columns = self.model().RAW_DATA\n outcomes_columns = self.model().OUTCOMES\n\n sort_by_col = self.model().get_current_outcome_type()\n data_type = self.model().get_current_outcome_type()\n\n print \"right click @ column: %s\" % column_clicked\n context_menu = QMenu(self)\n\n # add a covariate anywhere\n if column_clicked == 0:\n # option to (de-)select / include all studies\n # per Ethan (issue #100)\n action = QAction(\"include all\", self)\n QObject.connect(action, SIGNAL(\"triggered()\"), self.include_all_studies)\n if self.model().all_studies_are_included():\n action.setEnabled(False)\n context_menu.addAction(action)\n\n action = QAction(\"exclude all\", self)\n QObject.connect(action, SIGNAL(\"triggered()\"), self.exclude_all_studies)\n if self.model().all_studies_are_excluded():\n action.setEnabled(False)\n context_menu.addAction(action)\n\n context_menu.popup(self.mapToGlobal(pos))\n elif column_clicked in (1,2):\n col_name = {1:\"study name\", 2:\"year\"}[column_clicked]\n action_sort = QAction(\"sort studies by %s\" % col_name, self)\n \n QObject.connect(action_sort, SIGNAL(\"triggered()\"), lambda: self.sort_by_col(column_clicked))\n context_menu.addAction(action_sort)\n\n elif column_clicked in raw_data_columns and not data_type == \"diagnostic\":\n corresponding_tx_group = self.model().current_txs[0]\n if data_type == \"binary\":\n if column_clicked in raw_data_columns[2:]:\n corresponding_tx_group = self.model().current_txs[1]\n elif data_type == \"continuous\":\n if column_clicked in raw_data_columns[3:]:\n corresponding_tx_group = self.model().current_txs[1]\n \n #renaming\n action_rename = QAction(\"rename group %s...\" % corresponding_tx_group, self)\n QObject.connect(action_rename, SIGNAL(\"triggered()\"),\n lambda : self.main_gui.edit_group_name(corresponding_tx_group))\n context_menu.addAction(action_rename)\n # sorting\n col_name = self.model().headerData(column_clicked, Qt.Horizontal).toString()\n action_sort = QAction(\"sort studies by %s\" % col_name, self)\n QObject.connect(action_sort, SIGNAL(\"triggered()\"),\n lambda : self.sort_by_col(column_clicked))\n context_menu.addAction(action_sort)\n elif column_clicked in raw_data_columns and data_type == \"diagnostic\":\n # sorting\n col_name = self.model().headerData(column_clicked, Qt.Horizontal).toString()\n action_sort = QAction(\"sort studies by %s\" % col_name, self)\n QObject.connect(action_sort, SIGNAL(\"triggered()\"),\n lambda : self.sort_by_col(column_clicked))\n context_menu.addAction(action_sort)\n elif column_clicked in outcomes_columns:\n # sorting\n col_name = self.model().headerData(column_clicked, Qt.Horizontal).toString()\n action_sort = QAction(\"sort studies by %s\" % col_name, self)\n QObject.connect(action_sort, SIGNAL(\"triggered()\"),\n lambda : self.sort_by_col(column_clicked))\n context_menu.addAction(action_sort)\n elif column_clicked in covariate_columns:\n cov = self.model().get_cov(column_clicked)\n\n # and for sorting (issue #142)\n action_sort = QAction(\"sort studies by %s\" % cov.name, self)\n QObject.connect(action_sort, SIGNAL(\"triggered()\"),\n lambda : self.sort_by_col(column_clicked))\n context_menu.addAction(action_sort)\n\n action_ren = QAction(\"rename covariate %s\" % cov.name, self)\n QObject.connect(action_ren, SIGNAL(\"triggered()\"),\n lambda : self.main_gui.rename_covariate(cov))\n context_menu.addAction(action_ren)\n\n # allow deletion of covariate\n action_del = QAction(\"delete covariate %s\" % cov.name, self)\n QObject.connect(action_del, SIGNAL(\"triggered()\"),\n lambda : self.main_gui.delete_covariate(cov))\n context_menu.addAction(action_del)\n\n convert_to_str = \"*continuous*\"\n if cov.data_type == CONTINUOUS:\n convert_to_str = \"*factor*\"\n\n action_change = QAction(\"create a %s copy of %s\" % (convert_to_str, cov.name), self)\n QObject.connect(action_change, SIGNAL(\"triggered()\"),\n lambda : self.main_gui.change_cov_type(cov))\n context_menu.addAction(action_change)\n \n\n context_menu.popup(self.mapToGlobal(pos))\n\n def include_all_studies(self):\n self.model().include_all_studies()\n self.model().reset()\n\n def exclude_all_studies(self):\n self.model().exclude_all_studies()\n self.model().reset()\n\n def keyPressEvent(self, event): \n if (event.modifiers() & QtCore.Qt.ControlModifier):\n ## undo/redo\n\n if event.key() == QtCore.Qt.Key_Z:\n self.undoStack.undo()\n elif event.key() == QtCore.Qt.Key_Y:\n self.undoStack.redo()\n ### copy/paste\n elif event.key() == QtCore.Qt.Key_C:\n # ctrl + c = copy\n self.copy()\n elif event.key() == QtCore.Qt.Key_V:\n # ctrl + v = paste\n self.paste()\n else:\n ###\n # if the command hasn't anything to do with the table view\n # in particular, we pass the event up to the main UI\n self.main_gui.keyPressEvent(event)\n else:\n # fix for issue #180\n #if event.key() == QtCore.Qt.Key_Tab:\n # check to see if the next cell is\n # an outcome cell; if it is, treat\n # this like an enter, instead of a tab.\n \n\n\n ### \n # This is a call to the default keyPressEvent function,\n # which we are here overwriting, thereby eliminating\n # many of the annoying properties (no tab navigation; double\n # click editing only) that have been brought up/reported\n # as bugs. See issues: #21, #19\n # \n QTableView.keyPressEvent(self, event)\n\n \n \n def copy(self):\n # copy/paste: these only happen if at least one cell is selected\n selected_indexes = self.selectionModel().selectedIndexes()\n upper_left_index = self._upper_left(selected_indexes)\n lower_right_index = self._lower_right(selected_indexes) \n self.copy_contents_in_range(upper_left_index, lower_right_index,\n to_clipboard=True) \n \n def paste(self):\n # copy/paste: these only happen if at least one cell is selected\n selected_indexes = self.selectionModel().selectedIndexes()\n upper_left_index = self._upper_left(selected_indexes)\n lower_right_index = self._lower_right(selected_indexes)\n\n self.paste_from_clipboard(upper_left_index) \n self._enable_analysis_menus_if_appropriate()\n \n def row_header_clicked(self, row):\n if row > len(self.model().dataset) - 1:\n return\n \n # fix for issue # 184\n self.vert_header.blockSignals(True)\n \n # dispatch on the data type\n form = None\n study_index = row\n # fix for issue # 183\n ma_unit = copy.deepcopy(self.model().get_current_ma_unit_for_study(study_index))\n old_ma_unit = copy.deepcopy(ma_unit)\n cur_txs = self.model().current_txs\n cur_effect = self.model().current_effect\n cur_group_str = self.model().get_cur_group_str()\n data_type = self.model().get_current_outcome_type()\n\n ####\n # here we implement undo/redo.\n # in particular, we cache the raw data prior to editing;\n # then undo will simply overwrite the new raw data\n if data_type == \"binary\":\n ### need to back up\n cur_raw_data_dict = {}\n for group in cur_txs:\n cur_raw_data_dict[group] = list(ma_unit.get_raw_data_for_group(group))\n \n form = binary_data_form.BinaryDataForm2(ma_unit, cur_txs, cur_group_str, cur_effect, conf_level=self.model().get_global_conf_level(), parent=self)\n if form.exec_():\n # push the edit even\n ma_edit = CommandEditMAUnit(self, study_index, ma_unit, old_ma_unit)\n self.undoStack.push(ma_edit)\n elif data_type == \"continuous\":\n cur_raw_data_dict = {}\n for group_name in cur_txs:\n cur_raw_data_dict[group_name] = list(ma_unit.get_raw_data_for_group(group_name))\n \n #old_raw_data_dict = copy.deepcopy(cur_raw_data_dict)\n form = continuous_data_form.ContinuousDataForm(ma_unit, cur_txs, cur_group_str, cur_effect, conf_level=self.model().get_global_conf_level(), parent=self)\n if form.exec_():\n # update the model; push this event onto the stack\n ma_edit = CommandEditMAUnit(self, study_index, ma_unit, old_ma_unit)\n self.undoStack.push(ma_edit)\n else:\n # then this is diagnostic data\n cur_raw_data_dict = {}\n for group in cur_txs:\n cur_raw_data_dict[group] = list(ma_unit.get_raw_data_for_group(group))\n\n form = diagnostic_data_form.DiagnosticDataForm(ma_unit, cur_txs, cur_group_str, conf_level=self.model().get_global_conf_level(), parent=self)\n if form.exec_():\n ma_edit = CommandEditMAUnit(self, study_index, ma_unit, old_ma_unit)\n self.undoStack.push(ma_edit)\n self.vert_header.blockSignals(False)\n\n def rowMoved(self, row, oldIndex, newIndex):\n pass\n\n def displayed_ma_changed(self):\n cur_outcome = self.model().current_outcome\n cur_follow_up = self.model().current_time_point\n\n def cell_content_changed(self, index, old_val, new_val, study_added): \n # Only make a cell edit if the old values and new values are different\n try:\n print(\"Old val: %s, new val: %s\" % (unicode(old_val.toString()), unicode(new_val.toString())))\n except AttributeError:\n print(\"old val: %s, new val: %s\" % (str(old_val), str(new_val)))\n \n if not self._new_eq_old(old_val, new_val):\n cell_edit = CommandCellEdit(self, index, old_val, new_val,\n added_study=study_added)\n self.undoStack.push(cell_edit)\n self._enable_analysis_menus_if_appropriate()\n \n # make analysis menus change even when checkbox is (un)checked\n self._enable_analysis_menus_if_appropriate()\n \n def _new_eq_old(self, old, new):\n '''None and \"\" are the same. Assume old and new are QVariants'''\n \n blank_vals = meta_globals.EMPTY_VALS\n \n # transform into normal string:\n if old is not None:\n #old = str(old.toString())\n old = unicode(old.toString())\n if new is not None:\n #new = str(new.toString())\n new = unicode(new.toString())\n \n if old in blank_vals and new in blank_vals:\n return True\n \n return old==new\n \n \n def change_metric_if_appropriate(self):\n '''\n if: \n 1) there are at least 2 studies, and \n 2) none of them have data for two-arms, and,\n 3) the current metric is a two-arm metric\n then:\n we automatically change the metric to single-arm\n\n returns a tuple, wherein the first element is a boolean\n indicating whether or not the metric was indeed changed,\n and the second is the original metric\n '''\n original_metric = self.model().current_effect\n \n if len(self.model().dataset) > 2:\n data_type = self.model().get_current_outcome_type()\n if data_type == \"binary\" or data_type == \"continuous\":\n default_metric = {\"binary\":BINARY_ONE_ARM_METRICS[0], \n \"continuous\":CONTINUOUS_ONE_ARM_METRICS[0]}[data_type]\n \n if default_metric != original_metric and self.model().data_for_only_one_arm():\n self.set_metric_in_ui(default_metric)\n return (True, original_metric)\n return (False, original_metric)\n\n\n def get_covariate_columns(self):\n return range(self.model().OUTCOMES[-1]+1, self.model().columnCount())\n\n def header_clicked(self, column):\n can_sort_by = [self.model().NAME, self.model().YEAR]\n ## plus we can sort by any covariates, which correspond to those columns that are\n # beyond the last outcome\n covariate_columns = self.get_covariate_columns()\n can_sort_by.extend(covariate_columns)\n\n def sort_by_col(self, column):\n # if a covariate column was clicked, it may not yet have an entry in the\n # reverse_column_sorts dictionary; thus we insert one here\n #\n # @TODO this should *not* use the column number as the key!\n # rather, it should use the name -- the column number of a given\n # covariate might change (e.g., if another covariate is deleted)\n if not self.reverse_column_sorts.has_key(column):\n self.reverse_column_sorts[column] = False\n sort_command = CommandSort(self.model(), column, self.reverse_column_sorts[column])\n self.undoStack.push(sort_command)\n self.reverse_column_sorts[column] = not self.reverse_column_sorts[column]\n\n def _normalize_newlines(self, qstr_text):\n return qstr_text.replace(_newlines_re, \"\\n\")\n\n def paste_from_clipboard(self, upper_left_index):\n ''' pastes the data in the clipboard starting at the currently selected cell.'''\n\n clipboard = QApplication.clipboard()\n clipboard_text = clipboard.text()\n\n # fix for issue #169.\n # excel for mac, insanely, appends \\r instead of\n # \\n for new lines (rows).\n clipboard_text = self._normalize_newlines(clipboard_text)\n\n new_content = self._str_to_matrix(clipboard_text)\n\n # fix for issue #64. excel likes to append a blank row\n # to copied data -- we drop that here\n if self._is_blank_row(new_content[-1]):\n new_content = new_content[:-1]\n\n lower_row = upper_left_index.row() + len(new_content)\n lower_col = upper_left_index.column() + len(new_content[0])\n print \"lower row: %s, lower col: %s\" % (lower_row, lower_col)\n num_studies_pre_paste = len(self.model().dataset)\n studies_pre_paste = list(self.model().dataset.studies)\n lower_right_index = self.model().createIndex(lower_row-1, lower_col-1)\n old_content = self._str_to_matrix(self.copy_contents_in_range(upper_left_index, lower_right_index, to_clipboard=False))\n \n print \"old content: %s\" % old_content\n print \"new content: %s\" % new_content\n print \"upper left index:\"\n print self._print_index(upper_left_index)\n\n paste_command = CommandPaste(self, new_content, old_content,\n upper_left_index, studies_pre_paste,\n self.column_widths(), \"paste %s\" % new_content)\n self.undoStack.push(paste_command)\n\n def copy_contents_in_range(self, upper_left_index, lower_right_index, to_clipboard):\n '''\n copy the (textual) content of the cells in provided cell_range -- the copied contents will be\n cast to python Unicode strings and returned. If the to_clipboard flag is true, the contents will\n also be copied to the system clipboard\n '''\n print \"upper left index: %s, upper right index: %s\" % \\\n (self._print_index(upper_left_index), self._print_index(lower_right_index))\n text_matrix = []\n\n # +1s are because range() is right interval exclusive\n for row in range(upper_left_index.row(), lower_right_index.row()+1):\n current_row = []\n for col in range(upper_left_index.column(), lower_right_index.column()+1):\n cur_index = self.model().createIndex(row, col)\n cur_data = self.model().data(cur_index)\n if cur_data is not None:\n # this looks redundant, but in fact the toString method\n # converts the string into a QString\n cur_str = str(cur_data.toString())\n current_row.append(cur_str)\n else:\n current_row.append(\"\")\n text_matrix.append(current_row)\n\n copied_str = self._matrix_to_str(text_matrix)\n\n if to_clipboard:\n clipboard = QApplication.clipboard()\n clipboard.setText(copied_str)\n print \"copied str: %s\" % copied_str\n return copied_str\n\n def paste_contents(self, upper_left_index, source_content):\n '''\n paste the content in source_content into the matrix starting at the upper_left_coord\n cell. new rows will be added as needed; existing data will be overwritten\n '''\n origin_row, origin_col = upper_left_index.row(), upper_left_index.column()\n\n if isinstance(source_content[-1], QtCore.QStringList) and \\\n len(str(source_content[-1].join(\" \")))==0:\n # then there's a blank line; Excel has a habit\n # of appending blank lines (\\ns) to copied\n # text -- we get rid of it here\n source_content = source_content[:-1]\n\n # temporarily disable sorting to prevent automatic sorting of pasted data.\n # (note: this is consistent with Excel's approach.)\n self.model().blockSignals(True)\n\n for src_row in range(len(source_content)):\n # do we need to append a row?\n cur_row_count = self.model().rowCount()\n if cur_row_count <= origin_row + src_row:\n self._add_new_row()\n \n for src_col in range(len(source_content[0])):\n try:\n # note that we treat all of the data pasted as\n # one event; i.e., when undo is called, it undos the\n # whole paste\n index = self.model().createIndex(origin_row+src_row, origin_col+src_col)\n self.model().setData(index, QVariant(source_content[src_row][src_col]))\n except Exception, e:\n print \"whoops, exception while pasting: %s\" % e\n\n self.model().blockSignals(False)\n self.model().reset()\n\n def set_data_in_model(self, index, val):\n self.model().setData(index, val)\n self.model().reset()\n\n def column_widths(self):\n ''' returns the current column widths '''\n return [self.columnWidth(col_index) for col_index in range(self.model().columnCount())]\n\n def set_column_widths(self, widths):\n for col_index, width in enumerate(widths):\n self.setColumnWidth(col_index, width)\n\n\n def set_metric_in_ui(self, metric):\n '''\n calls the method on the UI to change\n the current metric -- this is the same\n method binded to the menu items, so call\n this to programmatically change the metric.\n '''\n menu = self.main_gui.oneArmMetricMenu\n if metric in TWO_ARM_METRICS:\n menu = self.main_gui.twoArmMetricMenu\n self.main_gui.metric_selected(metric, menu)\n\n def _enable_analysis_menus_if_appropriate(self):\n \n if len(self.model().dataset) >= 2 and self._get_number_of_included_studies() >= 2: #TODO add condition that there are at least two studies included\n self.main_gui.enable_menu_options_that_require_dataset()\n else:\n self.main_gui.disable_menu_options_that_require_dataset()\n \n def _get_number_of_included_studies(self):\n studies = self.model().dataset.studies\n num_included = 0\n for study in studies:\n if study.include and (not study.manually_excluded):\n num_included += 1\n print(\"included: %s, manually excluded: %s\" % (str(study.include), str(study.manually_excluded)))\n print(\"num included: %d\" % num_included)\n return num_included\n\n def _print_index(self, index):\n print \"(%s, %s)\" % (index.row(), index.column())\n\n def _add_new_row(self):\n '''\n add a new row to the dataTable; note that we briefly toggle sorting off so the row\n is beneath the existing rows.\n '''\n model = self.model()\n cur_row_count = model.rowCount()\n model.insertRow(cur_row_count)\n\n def _str_to_matrix(self, text, col_delimiter=\"\\t\", row_delimiter=\"\\n\"):\n ''' transforms raw text (e.g., from the clipboard) to a structured matrix '''\n m = []\n rows = text.split(row_delimiter)\n for row in rows:\n cur_row = row.split(col_delimiter)\n m.append(cur_row)\n return m\n\n def _print_row(self, r):\n print \"length of row: %s\" % len(r)\n for x in r:\n print x == \"\"\n print \"%s,\" % x\n print \"\\n\"\n\n def _is_blank_row(self, r):\n return len(r) == 1 and r[0] == \"\"\n\n def _matrix_to_str(self, m, col_delimiter=\"\\t\", row_delimiter=\"\\n\", append_new_line =False):\n ''' takes a matrix of data (i.e., a nested list) and converts to a string representation '''\n m_str = []\n for row in m:\n m_str.append(col_delimiter.join(row))\n return_str = row_delimiter.join(m_str)\n if append_new_line:\n return_str += row_delimiter\n return return_str\n\n def _upper_left(self, indexes):\n ''' returns the upper most index object in the indexes list.'''\n if len(indexes) > 0:\n upper_left = indexes[0]\n for index in indexes[1:]:\n if index.row() < upper_left.row() or index.column() < upper_left.column():\n upper_left = index\n return upper_left\n return None\n\n def _lower_right(self, indexes):\n if len(indexes) > 0:\n lower_right = indexes[0]\n for index in indexes[1:]:\n if index.row() > lower_right.row() or index.column() > lower_right.column():\n lower_right = index\n return lower_right\n return None\n\n def _add_studies_if_necessary(self, upper_left_index, content):\n '''\n if there are not enough studies to contain the content, this will \n add them.\n '''\n origin_row, origin_col = upper_left_index.row(), upper_left_index.column()\n num_existing_studies = len(self.model().dataset)\n\n num_to_add = len(content) - num_existing_studies - origin_row\n\n last_id = -1\n for i in range(num_to_add):\n # first let's give this a default study name, in case\n # none has been provided\n tmp_study_name = \"study %s\" % (num_existing_studies + i)\n study_index = self.model().createIndex(num_existing_studies + i, self.model().NAME)\n study_id = self.model().dataset.max_study_id()+1\n new_study = Study(study_id)\n self.model().dataset.add_study(new_study)\n\n # now append a blank study if studies were added.\n if num_to_add > 0:\n new_study = Study(self.model().dataset.max_study_id()+1)\n # ah! fix for issue #171. stupidly, I was not previously\n # excluding 'blank' studies appended here..\n new_study.include = False\n self.model().dataset.add_study(new_study)\n self.model().dataset.study_auto_added = int(new_study.id)\n\n self.model().reset()\n\nclass CommandCellEdit(QUndoCommand):\n '''\n Here we make use of QT's undo/redo framework. This is an UndoCommand for individual\n cell edits (as opposed to paste actions, which are represented by CommandPaste objects,\n defined below).\n '''\n def __init__(self, ma_data_table_view, index, original_content, new_content, \n ####metric_changed=False, old_metric=None, new_metric=None, # DELETE IF ALL IS WELL\n added_study=None,\n description=\"\"):\n super(CommandCellEdit, self).__init__(description)\n self.first_call = True\n if original_content == None:\n self.original_content = QVariant(QString(\"\"))\n else:\n self.original_content = original_content\n self.new_content = new_content\n self.row, self.col = index.row(), index.column()\n self.ma_data_table_view = ma_data_table_view\n self.added_study = added_study\n self.something_else = added_study\n \n #### output for debugging\n debug_params = dict(first_call = True,\n original_content = original_content,\n new_content = new_content,\n row = index.row(),\n col = index.column(),\n ma_data_table_view = ma_data_table_view,\n added_study = added_study,\n something_else = added_study,\n )\n\n print(\"CommandCellEdit created with parameters: %s\" % str(debug_params))\n #### end debugging output\n\n @DebugHelper\n def redo(self):\n index = self._get_index()\n \n if self.first_call:\n self.first_call = False\n ###\n # the self.added_study should be true if and only if\n # the event being done *caused* a study to be added.\n # in this case, we'll need to remove the added study\n # on the undo() call\n \n\n #self.added_study = self.ma_data_table_view.model().study_auto_added\n # note: previously (10/14/11) there was a call here to set the\n # model's study_auto_added field to None. I don't know why it was\n # here, and removed it. \n # > self.ma_data_table_view.model().study_auto_added = None\n else:\n model = self.ma_data_table_view.model()\n # here we block signals from the model. this is\n # to prevent memory access problems on the c\n # side of things, when the model emits\n # the data edited signal.\n model.blockSignals(True)\n model.setData(index, self.new_content)\n self.added_study = self.ma_data_table_view.model().study_auto_added\n self.ma_data_table_view.model().study_auto_added = None\n\n model.blockSignals(False)\n # make the view reflect the update\n self.ma_data_table_view.model().reset()\n \n self.ma_data_table_view._enable_analysis_menus_if_appropriate()\n self.ma_data_table_view.resizeColumnsToContents()\n\n # let everyone know that the data is dirty\n self.ma_data_table_view.emit(SIGNAL(\"dataDirtied()\"))\n\n @DebugHelper\n def undo(self):\n # in this case, the original editing action\n # had the effect of appending a row to the spreadsheet.\n # here we remove it.\n if self.added_study is not None:\n self.ma_data_table_view.model().remove_study(self.added_study)\n\n index = self._get_index()\n model = self.ma_data_table_view.model()\n\n # as in the redo method, we block signals before\n # editing the model data\n model.blockSignals(True)\n model.setData(index, self.original_content, allow_empty_names=True)\n\n model.blockSignals(False)\n self.ma_data_table_view.model().reset()\n\n \n # here is where we check if there are enough studies to actually\n # perform an analysis.\n self.ma_data_table_view._enable_analysis_menus_if_appropriate()\n self.ma_data_table_view.resizeColumnsToContents()\n self.ma_data_table_view.emit(SIGNAL(\"dataDirtied()\"))\n \n def _get_index(self):\n return self.ma_data_table_view.model().createIndex(self.row, self.col)\n\n \nclass CommandPaste(QUndoCommand):\n '''\n We again make use of QT's undo/redo framework. this implementation handles the paste action;\n the redo is just repasting the former contents into the same cells.\n '''\n def __init__(self, ma_data_table_view, new_content, old_content,\\\n upper_left_coord, old_studies, old_col_widths, description):\n super(CommandPaste, self).__init__(description)\n self.new_content, self.old_content = new_content, old_content\n self.upper_left_coord = upper_left_coord\n self.old_column_widths = old_col_widths\n self.ma_data_table_view = ma_data_table_view\n self.added_study = None\n self.metric_changed = None\n self.old_metric = None\n self.new_metric = None\n # is this the first time? \n self.first_call = True\n\n print(\"CommandPaste created\")\n \n @DebugHelper\n def redo(self):\n # cache the original dataset\n self.original_dataset = copy.deepcopy(self.ma_data_table_view.model().dataset)\n self.original_state_dict = copy.copy(self.ma_data_table_view.model().get_stateful_dict())\n\n # paste the data\n self.ma_data_table_view._add_studies_if_necessary(self.upper_left_coord, self.new_content)\n self.ma_data_table_view.paste_contents(self.upper_left_coord, self.new_content)\n\n if self.first_call:\n # on the first application of the paste, we need to ascertain\n # whether the metric changed automatically (e.g., because it\n # looks like tpasted data is single-arm)\n self.metric_changed, self.old_metric = \\\n self.ma_data_table_view.change_metric_if_appropriate()\n\n if self.metric_changed:\n self.new_metric = self.ma_data_table_view.model().current_effect\n #self.ma_data_table_view.set_metric_in_ui(self.new_metric)\n self.first_call = False\n else:\n # did the metric change on the original paste? \n # if so re-change it here\n if self.metric_changed is not None:\n self.ma_data_table_view.set_metric_in_ui(self.new_metric)\n\n self.ma_data_table_view.model().reset()\n self.ma_data_table_view._enable_analysis_menus_if_appropriate()\n self.ma_data_table_view.emit(SIGNAL(\"dataDirtied()\"))\n self.ma_data_table_view.resizeColumnsToContents()\n\n @DebugHelper\n def undo(self):\n if self.added_study is not None:\n self.ma_data_table_view.model().remove_study(self.added_study)\n self.ma_data_table_view.main_gui.set_model(self.original_dataset,\n state_dict=self.original_state_dict)\n\n\n # did we change the metric automatically (e.g., because it\n # looked like the user was exploring single-arm data?) if\n # so, change it back\n if self.metric_changed:\n self.ma_data_table_view.set_metric_in_ui(self.old_metric)\n\n self.ma_data_table_view.model().reset()\n self.ma_data_table_view._enable_analysis_menus_if_appropriate()\n self.ma_data_table_view.emit(SIGNAL(\"dataDirtied()\"))\n\nclass CommandEditMAUnit(QUndoCommand):\n def __init__(self, table_view, study_index, new_ma_unit, old_ma_unit, description=\"MA unit edit\"):\n super(CommandEditMAUnit, self).__init__(description)\n self.model = table_view.model()\n self.old_ma_unit = old_ma_unit\n self.new_ma_unit = new_ma_unit\n self.table_view = table_view\n self.study_index = study_index\n self.ma_data_table_view = table_view\n \n # for debugging\n print(\"CommandEditMAunit created\")\n \n @DebugHelper\n def undo(self):\n self.model.set_current_ma_unit_for_study(self.study_index, self.old_ma_unit)\n self.model.reset()\n self.table_view.resizeColumnsToContents()\n self.ma_data_table_view.emit(SIGNAL(\"dataDirtied()\"))\n\n @DebugHelper\n def redo(self):\n self.model.set_current_ma_unit_for_study(self.study_index, self.new_ma_unit)\n self.model.reset()\n self.model.try_to_update_outcomes()\n \n #self.table_view.model().reset()\n self.table_view.resizeColumnsToContents()\n self.ma_data_table_view.emit(SIGNAL(\"dataDirtied()\"))\n\n# IS THIS CLASS USED ANYWHERE?\nclass CommandEditRawData(QUndoCommand):\n def __init__(self, ma_unit, model, old_raw_data_dict, new_raw_data_dict, description=\"Raw data edit\"):\n super(CommandEditRawData, self).__init__(description)\n self.ma_unit = ma_unit\n # we take the model in as a parameter so we can call reset(), in turn\n # notifying the view to refresh. otherwise, the old data is displayed\n # until the user interacts with it in some way\n self.model = model \n self.old_raw_data_dict = old_raw_data_dict\n self.new_raw_data_dict = new_raw_data_dict\n self.group_names = self.old_raw_data_dict.keys()\n \n print(\"Command Edit RawData created\")\n \n @DebugHelper\n def undo(self):\n for group_name in self.group_names:\n raw_data = self.old_raw_data_dict[group_name]\n self.ma_unit.set_raw_data_for_group(group_name, raw_data)\n self.model.reset()\n self.ma_data_table_view.emit(SIGNAL(\"dataDirtied()\"))\n\n @DebugHelper\n def redo(self):\n for group_name in self.group_names:\n raw_data = self.new_raw_data_dict[group_name]\n self.ma_unit.set_raw_data_for_group(group_name, raw_data)\n self.model.reset()\n self.ma_data_table_view.emit(SIGNAL(\"dataDirtied()\"))\n\nclass CommandSort(QUndoCommand):\n def __init__(self, ma_data_table_model, col, reverse_order, description=\"Sort\"):\n super(CommandSort, self).__init__(description)\n self.model = ma_data_table_model\n self.col = col\n self.reverse = reverse_order\n self.previous_order = None\n \n print(\"CommandSort created\")\n\n def redo(self):\n self.previous_order = self.model.get_ordered_study_ids()\n self.model.sort_studies(self.col, self.reverse)\n self.model.reset()\n\n def undo(self):\n self.model.order_studies(self.previous_order)\n self.model.reset()\n \nclass StudyDelegate(QItemDelegate):\n\n def __init__(self, parent=None):\n super(StudyDelegate, self).__init__(parent)\n\n def createEditor(self, parent, *args):\n le = QLineEdit(parent)\n return le\n\n def setEditorData(self, editor, index):\n # used to be Qt.DisplayRole\n text = index.model().data(index, Qt.EditRole)\n editor.setText(text.toString())\n\n" }, { "alpha_fraction": 0.4768069088459015, "alphanum_fraction": 0.5026968717575073, "avg_line_length": 32.33333206176758, "blob_id": "7c20a250c349578f93f349a63beb88432eb38cf5", "content_id": "b5929e03e3d0c4cfbda983d63c3f58867df548d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 927, "license_type": "no_license", "max_line_length": 138, "num_lines": 27, "path": "/src/R/HSROC/R/M_H2_IND.R", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "M_H2_IND <-\r\nfunction (r, y, a, N, low.beta, up.beta, x) \r\n{\r\n acc_rate = 0\r\n alpha.G = sum(0.5 * (1 - y)) + 1\r\n beta.G = 0.5 * sum((1 - y) * (r + 0.5 * a)^2)\r\n alpha.IG = sum(0.5 * y)\r\n beta.IG = 0.5 * sum(y * (r - 0.5 * a)^2)\r\n can <- truncgamma(n = 1, shape = alpha.G, scale = 1/beta.G, \r\n l = low.beta, u = up.beta)\r\n ratio = dinvgamma(can, shape = alpha.IG, scale = beta.IG)/dinvgamma(x, \r\n shape = alpha.IG, scale = beta.IG)\r\n if (is.na(ratio) == TRUE) {\r\n files.remove()\r\n }\r\n if (is.na(ratio) == TRUE) {\r\n cat(paste(\"Unsuitable initial values were provided. \"))\r\n stop(\"Please respecify and call HSROC() again.\\n If you're using 'init=NULL' you need just to run the 'HSROC' function again.\\n\")\r\n }\r\n aprob = min(1, ratio)\r\n u = runif(1)\r\n if (u < aprob) {\r\n x = can\r\n acc_rate = 1\r\n }\r\n return(c(x, acc_rate, aprob))\r\n}\r\n" }, { "alpha_fraction": 0.5705941319465637, "alphanum_fraction": 0.5844459533691406, "avg_line_length": 35.76687240600586, "blob_id": "91ebc2aebc12b8fa7ff522bea98b70ad8f82c7ea", "content_id": "02373db6e9c143c608dd0830edba26a82655b0db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5992, "license_type": "no_license", "max_line_length": 103, "num_lines": 163, "path": "/src/old_testing.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "#############################################################################\n# Unit tests! Use nose\n# [http://somethingaboutorange.com/mrl/projects/nose] or just\n# > easy_install nose\n#\n# e.g., while in this directory:\n# > nosetests meta_form\n#\n##############################################################################\n\n\n\nimport sys\nimport os\nfrom PyQt4 import QtCore, QtGui\nfrom PyQt4.Qt import *\n\n\nimport ma_data_table_model\nimport meta_globals\nimport ma_dataset\nfrom meta_form import MetaForm\n\n# ####################################################\n# # this (toy-data) is almost certainly antiquated #\n# # and should be removed or updated #\n# ####################################################\n# if len(sys.argv)>1 and sys.argv[-1]==\"--toy-data\":\n# # toy data for now\n# data_model = _gen_some_data()\n# self.model = ma_data_table_model.DatasetModel(dataset=data_model)\n# self.display_outcome(\"death\")\n# self.model.set_current_time_point(0)\n# self.model.current_effect = \"OR\"\n# self.model.try_to_update_outcomes()\n# self.model.reset()\n# self.tableView.resizeColumnsToContents()\n# else:\n\ndef _gen_some_data():\n ''' For testing purposes. Generates a toy dataset.'''\n dataset = ma_dataset.Dataset()\n studies = [ma_dataset.Study(i, name=study, year=y) for i, study, y in zip(range(3),\n [\"trik\", \"wallace\", \"lau\"], [1984, 1990, 2000])]\n raw_data = [\n [ [10, 100] , [15, 100] ], [ [20, 200] , [25, 200] ],\n [ [30, 300] , [35, 300] ]\n ]\n \n outcome = ma_dataset.Outcome(\"death\", meta_globals.BINARY)\n dataset.add_outcome(outcome)\n\n for study in studies:\n dataset.add_study(study)\n \n for study,data in zip(dataset.studies, raw_data):\n study.add_ma_unit(ma_dataset.MetaAnalyticUnit(outcome, raw_data=data), \"baseline\")\n \n return dataset\n\n\ndef _setup_app():\n app = QtGui.QApplication(sys.argv)\n meta = MetaForm()\n meta.tableView.setSelectionMode(QTableView.ContiguousSelection)\n meta.show()\n return (meta, app)\n\ndef _tear_down_app(app):\n sys.exit(app.exec_())\n\n\ndef binary_meta_analysis_test():\n meta, app = _setup_app()\n meta.open(os.path.join(\"test_data\", \"amino.oma\"))\n\n ####\n # TODO -- run through all metrics here\n\ndef copy_paste_test():\n meta, app = _setup_app()\n\n # generate some faux data, set up the\n # tableview model\n #data_model = _gen_some_data()\n meta.open(os.path.join(\"test_data\", \"amino.oma\"))\n\n #test_model = DatasetModel(dataset=data_model)\n #meta.tableView.setModel(test_model)\n\n upper_left_index = meta.tableView.model().createIndex(0, 1)\n lower_right_index = meta.tableView.model().createIndex(1, 2)\n copied = meta.tableView.copy_contents_in_range(upper_left_index, lower_right_index,\n to_clipboard=False)\n\n tester = \"trik\\t1984\\nwallace\\t1990\"\n\n assert(copied == tester)\n print \"ok.. copied correctly\"\n \n # now ascertain that we can paste it. first, copy (the same string) to the clipboard\n copied = meta.tableView.copy_contents_in_range(upper_left_index, lower_right_index,\n to_clipboard=True)\n upper_left_index = meta.tableView.model().createIndex(1, 1)\n\n # originally, the second row is wallace\n assert(str(meta.tableView.model().data(upper_left_index).toString()) == \"wallace\")\n meta.tableView.paste_from_clipboard(upper_left_index)\n # now, the 2nd row (index:1) should contain trik\n assert(str(meta.tableView.model().data(upper_left_index).toString()) == \"trik\")\n\n\ndef test_add_new_outcome():\n meta, app = _setup_app()\n data_model = _gen_some_data()\n test_model = ma_data_table_model.DatasetModel(dataset=data_model)\n meta.tableView.setModel(test_model)\n new_outcome_name = u\"test outcome\"\n new_outcome_type = \"binary\"\n meta._add_new_outcome(new_outcome_name, new_outcome_type)\n outcome_names = meta.model.dataset.get_outcome_names()\n assert(new_outcome_name in outcome_names)\n \ndef test_remove_outcome():\n meta, app = _setup_app()\n data_model = _gen_some_data()\n test_model = ma_data_table_model.DatasetModel(dataset=data_model)\n meta.tableView.setModel(test_model)\n new_outcome_name = u\"test outcome\"\n new_outcome_type = \"binary\"\n meta._add_new_outcome(new_outcome_name, new_outcome_type)\n # note that we test the adding functionality elsewhere\n meta.model.dataset.remove_outcome(new_outcome_name)\n outcome_names = meta.model.dataset.get_outcome_names()\n assert (new_outcome_name not in outcome_names)\n \ndef paste_from_excel_test():\n meta, app = _setup_app()\n\n #set up the tableview model with a blank model\n test_model = ma_data_table_model.DatasetModel()\n meta.tableView.setModel(test_model)\n upper_left_index = meta.tableView.model().createIndex(0, 1)\n # copied from an Excel spreadsheet\n copied_str = \"\"\"a 1993\nb 1785\n\"\"\"\n clipboard = QApplication.clipboard()\n clipboard.setText(QString(copied_str))\n meta.tableView.paste_from_clipboard(upper_left_index)\n\n #\n # now make sure the content is there\n content = [[\"a\", \"1993\"], [\"b\", \"1785\"]]\n for row in range(len(content)):\n for col in range(len(content[row])):\n # the plus one offsets the first column, which is the include/\n # exclude checkbox\n cur_index = meta.tableView.model().createIndex(row, col+1)\n cur_val = str(meta.tableView.model().data(cur_index).toString())\n should_be = content[row][col]\n print \"cur val is %s; it should be %s\" % (cur_val, should_be)\n assert(cur_val == should_be)" }, { "alpha_fraction": 0.5246015191078186, "alphanum_fraction": 0.524948000907898, "avg_line_length": 41.98507308959961, "blob_id": "fd6cf579a92dcc9c73f048ce2eaa2aa6685e1c69", "content_id": "8a0bc7eac522f37d9f9519f726b3b3c6fff64a0e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5772, "license_type": "no_license", "max_line_length": 190, "num_lines": 134, "path": "/src/meta_reg_form.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "from PyQt4.Qt import *\n\nimport forms.ui_meta_reg\nimport meta_py_r\n\nclass MetaRegForm(QDialog, forms.ui_meta_reg.Ui_cov_reg_dialog):\n \n def __init__(self, model, parent=None):\n super(MetaRegForm, self).__init__(parent)\n self.model = model\n self.setupUi(self)\n self.covs_and_check_boxes = None\n self._populate_chk_boxes()\n\n # as usual, diagnostic data is special\n self.is_diagnostic = self.model.get_current_outcome_type() == \"diagnostic\"\n \n if not self.is_diagnostic:\n self.diagnostic_group_box.hide()\n\n QObject.connect(self.buttonBox, SIGNAL(\"rejected()\"), self.cancel)\n QObject.connect(self.buttonBox, SIGNAL(\"accepted()\"), self.run_meta_reg)\n \n def cancel(self):\n print \"(cancel)\"\n self.reject()\n \n def run_meta_reg(self):\n at_least_one_study_does_not_have_vals = False\n cov_d = {}\n selected_covariates = []\n for cov, chk_box in self.covs_and_check_boxes:\n if chk_box.isChecked():\n selected_covariates.append(cov)\n\n # here we have to exclude studies that do not have values\n # for all of the selected covariates\n cov_d[cov.name] = \\\n self.model.dataset.get_values_for_cov(cov.name, ids_for_keys=True)\n\n current_effect = self.model.current_effect\n if self.is_diagnostic:\n if self.dor_radio.isChecked():\n current_effect = \"DOR\"\n elif self.sensitivity_radio.isChecked():\n current_effect = \"Sens\"\n else:\n current_effect = \"Spec\"\n\n studies = []\n for study in [study for study in self.model.get_studies(only_if_included=True)]:\n has_covs = [study.id in cov_d[selected_cov.name] for selected_cov in selected_covariates]\n #if study != '' and study.id in cov_d and cov_d[study.id] is not None:\n # studies.append(study)\n if all(has_covs):\n studies.append(study)\n else:\n at_least_one_study_does_not_have_vals = True\n\n if self.is_diagnostic:\n meta_py_r.ma_dataset_to_simple_diagnostic_robj(self.model,\\\n metric=current_effect,\n covs_to_include=selected_covariates,\n studies=studies) \n elif self.model.get_current_outcome_type() == \"continuous\":\n meta_py_r.ma_dataset_to_simple_continuous_robj(self.model,\\\n covs_to_include=selected_covariates,\n studies=studies) \n else:\n meta_py_r.ma_dataset_to_simple_binary_robj(self.model, include_raw_data=False,\\\n covs_to_include=selected_covariates,\n studies=studies)\n\n \n # fixed or random effects meta-regression?\n fixed_effects = False\n if self.fixed_effects_radio.isChecked():\n fixed_effects = True\n \n\n if at_least_one_study_does_not_have_vals:\n # TODO: this run_with_missing stuff needs to be finished i.e.\n # actually remove the affected studies. Currently just throws an\n # error\n run_with_missing = QMessageBox.warning(self,\n \"Missing covariate value(s)\",\n \"Some studies do not have values for the covariate(s) you have selected. Do you want me to run the regression without them (i.e., drop studies with missing values)?\",\n QMessageBox.Yes | QMessageBox.No)\n \n if run_with_missing == QMessageBox.No:\n self.accept()\n return\n\n\n result = meta_py_r.run_meta_regression(self.model.dataset, studies,\n selected_covariates,\n current_effect,\n fixed_effects=fixed_effects,\n conf_level=self.model.get_global_conf_level())\n if isinstance(result, str):\n # then there was an error!\n QMessageBox.critical(self,\n \"Whoops.\",\n \"Sorry, there was an error performing the regression.\\n%s\" % \\\n result)\n else:\n self.parent().analysis(result)\n self.accept()\n \n def _populate_combo_box(self):\n studies = self.model.get_studies(only_if_included=True)\n \n for cov in self.model.dataset.covariates:\n cov_vals = [study.covariate_dict[cov.name] for study in studies]\n if not None in cov_vals:\n self.cov_cbo_box.addItem(cov.name)\n\n def _populate_chk_boxes(self):\n self.covs_and_check_boxes = []\n studies = self.model.get_studies(only_if_included=True)\n \n chk_box_layout = QGridLayout()\n for cov in self.model.dataset.covariates:\n cov_vals = [study.covariate_dict[cov.name] for study in studies]\n # note that we're *allowing* empty strings\n chk_box = QCheckBox(cov.name)\n if len(self.covs_and_check_boxes)==0:\n # check the first covariate by default\n # (this is arbitrary)\n chk_box.setChecked(True)\n chk_box_layout.addWidget(chk_box)\n self.covs_and_check_boxes.append((cov, chk_box))\n \n self.cov_grp_box.setLayout(chk_box_layout)\n " }, { "alpha_fraction": 0.51722651720047, "alphanum_fraction": 0.5232558250427246, "avg_line_length": 28.552631378173828, "blob_id": "a4883a04ccb730dff14178153e9e08a068fcad7e", "content_id": "6c0309de18d70f2a16ed486f7a34854a55d4c7e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 2322, "license_type": "no_license", "max_line_length": 59, "num_lines": 76, "path": "/src/R/HSROC/R/files.remove.R", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "files.remove <-\r\nfunction (...) \r\n{\r\n if (file.exists(\"alpha.txt\") == TRUE) {\r\n file.remove(\"alpha.txt\")\r\n }\r\n if (file.exists(\"beta.txt\") == TRUE) {\r\n file.remove(\"beta.txt\")\r\n }\r\n if (file.exists(\"C_overall.txt\") == TRUE) {\r\n file.remove(\"C_overall.txt\")\r\n }\r\n if (file.exists(\"capital.THETA.txt\") == TRUE) {\r\n file.remove(\"capital.THETA.txt\")\r\n }\r\n if (file.exists(\"Initial values.txt\") == TRUE) {\r\n file.remove(\"Initial values.txt\")\r\n }\r\n if (file.exists(\"LAMBDA.txt\") == TRUE) {\r\n file.remove(\"LAMBDA.txt\")\r\n }\r\n if (file.exists(\"model.txt\") == TRUE) {\r\n file.remove(\"model.txt\")\r\n }\r\n if (file.exists(\"PI.txt\") == TRUE) {\r\n file.remove(\"PI.txt\")\r\n }\r\n if (file.exists(\"Prior.information.txt\") == TRUE) {\r\n file.remove(\"Prior.information.txt\")\r\n }\r\n if (file.exists(\"S_overall.txt\") == TRUE) {\r\n file.remove(\"S_overall.txt\")\r\n }\r\n if (file.exists(\"Sens1.txt\") == TRUE) {\r\n file.remove(\"Sens1.txt\")\r\n }\r\n if (file.exists(\"Sens2.txt\") == TRUE) {\r\n file.remove(\"Sens2.txt\")\r\n }\r\n if (file.exists(\"sigma.alpha.txt\") == TRUE) {\r\n file.remove(\"sigma.alpha.txt\")\r\n }\r\n if (file.exists(\"sigma.theta.txt\") == TRUE) {\r\n file.remove(\"sigma.theta.txt\")\r\n }\r\n if (file.exists(\"Spec1.txt\") == TRUE) {\r\n file.remove(\"Spec1.txt\")\r\n }\r\n if (file.exists(\"Spec2.txt\") == TRUE) {\r\n file.remove(\"Spec2.txt\")\r\n }\r\n if (file.exists(\"theta.txt\") == TRUE) {\r\n file.remove(\"theta.txt\")\r\n }\r\n if (file.exists(\"C2.txt\") == TRUE) {\r\n file.remove(\"C2.txt\")\r\n }\r\n if (file.exists(\"Random Initial values.txt\") == TRUE) {\r\n file.remove(\"Random Initial values.txt\")\r\n }\r\n if (file.exists(\"S2.txt\") == TRUE) {\r\n file.remove(\"S2.txt\")\r\n }\r\n if (file.exists(\"REstart_values_index.txt\") == TRUE) {\r\n file.remove(\"REstart_values_index.txt\")\r\n }\r\n if (file.exists(\"REstarting REFSTD.txt\") == TRUE) {\r\n file.remove(\"REstarting REFSTD.txt\")\r\n }\r\n if (file.exists(\"REstarting values.txt\") == TRUE) {\r\n file.remove(\"REstarting values.txt\")\r\n }\r\n if (file.exists(\"REstarting values 2.txt\") == TRUE) {\r\n file.remove(\"REstarting values 2.txt\")\r\n }\r\n}\r\n" }, { "alpha_fraction": 0.6366323232650757, "alphanum_fraction": 0.6671459674835205, "avg_line_length": 55.64706039428711, "blob_id": "905fb0804f5616a4b2875a8a91ff56fe5044cf32", "content_id": "32a9e227c18baa828504cf6d4bca1b6dc197aa81", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12519, "license_type": "no_license", "max_line_length": 115, "num_lines": 221, "path": "/src/forms/ui_ma_specs.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'ma_specs2.ui'\n#\n# Created: Wed Apr 17 14:37:20 2013\n# by: PyQt4 UI code generator 4.10\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt4 import QtCore, QtGui\n\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n def _fromUtf8(s):\n return s\n\ntry:\n _encoding = QtGui.QApplication.UnicodeUTF8\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig, _encoding)\nexcept AttributeError:\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig)\n\nclass Ui_Dialog(object):\n def setupUi(self, Dialog):\n Dialog.setObjectName(_fromUtf8(\"Dialog\"))\n Dialog.setWindowModality(QtCore.Qt.ApplicationModal)\n Dialog.resize(506, 485)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n Dialog.setFont(font)\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(_fromUtf8(\":/images/meta.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n Dialog.setWindowIcon(icon)\n self.verticalLayout = QtGui.QVBoxLayout(Dialog)\n self.verticalLayout.setObjectName(_fromUtf8(\"verticalLayout\"))\n self.specs_tab = QtGui.QTabWidget(Dialog)\n self.specs_tab.setObjectName(_fromUtf8(\"specs_tab\"))\n self.methods_tab = QtGui.QWidget()\n self.methods_tab.setObjectName(_fromUtf8(\"methods_tab\"))\n self.horizontalLayout = QtGui.QHBoxLayout(self.methods_tab)\n self.horizontalLayout.setObjectName(_fromUtf8(\"horizontalLayout\"))\n self.gridLayout = QtGui.QGridLayout()\n self.gridLayout.setObjectName(_fromUtf8(\"gridLayout\"))\n self.method_lbl = QtGui.QLabel(self.methods_tab)\n self.method_lbl.setMinimumSize(QtCore.QSize(200, 0))\n self.method_lbl.setMaximumSize(QtCore.QSize(200, 16777215))\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n self.method_lbl.setFont(font)\n self.method_lbl.setAlignment(QtCore.Qt.AlignCenter)\n self.method_lbl.setObjectName(_fromUtf8(\"method_lbl\"))\n self.gridLayout.addWidget(self.method_lbl, 0, 0, 1, 1)\n self.method_cbo_box = QtGui.QComboBox(self.methods_tab)\n self.method_cbo_box.setObjectName(_fromUtf8(\"method_cbo_box\"))\n self.gridLayout.addWidget(self.method_cbo_box, 0, 1, 1, 1)\n self.parameter_grp_box = QtGui.QGroupBox(self.methods_tab)\n sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(40)\n sizePolicy.setHeightForWidth(self.parameter_grp_box.sizePolicy().hasHeightForWidth())\n self.parameter_grp_box.setSizePolicy(sizePolicy)\n self.parameter_grp_box.setMinimumSize(QtCore.QSize(0, 200))\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n self.parameter_grp_box.setFont(font)\n self.parameter_grp_box.setTitle(_fromUtf8(\"\"))\n self.parameter_grp_box.setObjectName(_fromUtf8(\"parameter_grp_box\"))\n self.gridLayout.addWidget(self.parameter_grp_box, 1, 0, 1, 2)\n self.horizontalLayout.addLayout(self.gridLayout)\n self.specs_tab.addTab(self.methods_tab, _fromUtf8(\"\"))\n self.plot_tab = QtGui.QWidget()\n self.plot_tab.setObjectName(_fromUtf8(\"plot_tab\"))\n self.verticalLayout_2 = QtGui.QVBoxLayout(self.plot_tab)\n self.verticalLayout_2.setObjectName(_fromUtf8(\"verticalLayout_2\"))\n self.groupBox = QtGui.QGroupBox(self.plot_tab)\n self.groupBox.setObjectName(_fromUtf8(\"groupBox\"))\n self.horizontalLayout_2 = QtGui.QHBoxLayout(self.groupBox)\n self.horizontalLayout_2.setObjectName(_fromUtf8(\"horizontalLayout_2\"))\n self.gridLayout_2 = QtGui.QGridLayout()\n self.gridLayout_2.setObjectName(_fromUtf8(\"gridLayout_2\"))\n self.label_2 = QtGui.QLabel(self.groupBox)\n self.label_2.setObjectName(_fromUtf8(\"label_2\"))\n self.gridLayout_2.addWidget(self.label_2, 0, 0, 1, 1)\n self.col1_str_edit = QtGui.QLineEdit(self.groupBox)\n self.col1_str_edit.setObjectName(_fromUtf8(\"col1_str_edit\"))\n self.gridLayout_2.addWidget(self.col1_str_edit, 0, 1, 1, 1)\n self.show_1 = QtGui.QCheckBox(self.groupBox)\n self.show_1.setChecked(True)\n self.show_1.setObjectName(_fromUtf8(\"show_1\"))\n self.gridLayout_2.addWidget(self.show_1, 0, 2, 1, 1)\n self.label_4 = QtGui.QLabel(self.groupBox)\n self.label_4.setObjectName(_fromUtf8(\"label_4\"))\n self.gridLayout_2.addWidget(self.label_4, 1, 0, 1, 1)\n self.col2_str_edit = QtGui.QLineEdit(self.groupBox)\n self.col2_str_edit.setObjectName(_fromUtf8(\"col2_str_edit\"))\n self.gridLayout_2.addWidget(self.col2_str_edit, 1, 1, 1, 1)\n self.show_2 = QtGui.QCheckBox(self.groupBox)\n self.show_2.setChecked(True)\n self.show_2.setObjectName(_fromUtf8(\"show_2\"))\n self.gridLayout_2.addWidget(self.show_2, 1, 2, 1, 1)\n self.label_5 = QtGui.QLabel(self.groupBox)\n self.label_5.setObjectName(_fromUtf8(\"label_5\"))\n self.gridLayout_2.addWidget(self.label_5, 2, 0, 1, 1)\n self.col3_str_edit = QtGui.QLineEdit(self.groupBox)\n self.col3_str_edit.setObjectName(_fromUtf8(\"col3_str_edit\"))\n self.gridLayout_2.addWidget(self.col3_str_edit, 2, 1, 1, 1)\n self.show_3 = QtGui.QCheckBox(self.groupBox)\n self.show_3.setChecked(True)\n self.show_3.setObjectName(_fromUtf8(\"show_3\"))\n self.gridLayout_2.addWidget(self.show_3, 2, 2, 1, 1)\n self.label_6 = QtGui.QLabel(self.groupBox)\n self.label_6.setObjectName(_fromUtf8(\"label_6\"))\n self.gridLayout_2.addWidget(self.label_6, 3, 0, 1, 1)\n self.col4_str_edit = QtGui.QLineEdit(self.groupBox)\n self.col4_str_edit.setObjectName(_fromUtf8(\"col4_str_edit\"))\n self.gridLayout_2.addWidget(self.col4_str_edit, 3, 1, 1, 1)\n self.show_4 = QtGui.QCheckBox(self.groupBox)\n self.show_4.setChecked(True)\n self.show_4.setObjectName(_fromUtf8(\"show_4\"))\n self.gridLayout_2.addWidget(self.show_4, 3, 2, 1, 1)\n self.horizontalLayout_2.addLayout(self.gridLayout_2)\n self.verticalLayout_2.addWidget(self.groupBox)\n self.gridLayout_3 = QtGui.QGridLayout()\n self.gridLayout_3.setObjectName(_fromUtf8(\"gridLayout_3\"))\n self.label_7 = QtGui.QLabel(self.plot_tab)\n self.label_7.setMaximumSize(QtCore.QSize(16777215, 50))\n self.label_7.setObjectName(_fromUtf8(\"label_7\"))\n self.gridLayout_3.addWidget(self.label_7, 0, 0, 1, 1)\n self.x_lbl_le = QtGui.QLineEdit(self.plot_tab)\n self.x_lbl_le.setObjectName(_fromUtf8(\"x_lbl_le\"))\n self.gridLayout_3.addWidget(self.x_lbl_le, 0, 2, 1, 1)\n self.label_8 = QtGui.QLabel(self.plot_tab)\n self.label_8.setObjectName(_fromUtf8(\"label_8\"))\n self.gridLayout_3.addWidget(self.label_8, 4, 0, 1, 1)\n self.x_ticks_le = QtGui.QLineEdit(self.plot_tab)\n self.x_ticks_le.setObjectName(_fromUtf8(\"x_ticks_le\"))\n self.gridLayout_3.addWidget(self.x_ticks_le, 4, 2, 1, 1)\n self.label_9 = QtGui.QLabel(self.plot_tab)\n self.label_9.setObjectName(_fromUtf8(\"label_9\"))\n self.gridLayout_3.addWidget(self.label_9, 1, 0, 1, 1)\n self.plot_lb_le = QtGui.QLineEdit(self.plot_tab)\n self.plot_lb_le.setObjectName(_fromUtf8(\"plot_lb_le\"))\n self.gridLayout_3.addWidget(self.plot_lb_le, 1, 2, 1, 1)\n self.label_10 = QtGui.QLabel(self.plot_tab)\n self.label_10.setObjectName(_fromUtf8(\"label_10\"))\n self.gridLayout_3.addWidget(self.label_10, 2, 0, 1, 1)\n self.plot_ub_le = QtGui.QLineEdit(self.plot_tab)\n self.plot_ub_le.setObjectName(_fromUtf8(\"plot_ub_le\"))\n self.gridLayout_3.addWidget(self.plot_ub_le, 2, 2, 1, 1)\n self.label_11 = QtGui.QLabel(self.plot_tab)\n self.label_11.setObjectName(_fromUtf8(\"label_11\"))\n self.gridLayout_3.addWidget(self.label_11, 5, 0, 1, 1)\n self.show_summary_line = QtGui.QCheckBox(self.plot_tab)\n self.show_summary_line.setText(_fromUtf8(\"\"))\n self.show_summary_line.setChecked(True)\n self.show_summary_line.setObjectName(_fromUtf8(\"show_summary_line\"))\n self.gridLayout_3.addWidget(self.show_summary_line, 5, 2, 1, 1)\n self.verticalLayout_2.addLayout(self.gridLayout_3)\n self.horizontalLayout_4 = QtGui.QHBoxLayout()\n self.horizontalLayout_4.setObjectName(_fromUtf8(\"horizontalLayout_4\"))\n self.label_3 = QtGui.QLabel(self.plot_tab)\n self.label_3.setObjectName(_fromUtf8(\"label_3\"))\n self.horizontalLayout_4.addWidget(self.label_3)\n self.image_path = QtGui.QLineEdit(self.plot_tab)\n self.image_path.setObjectName(_fromUtf8(\"image_path\"))\n self.horizontalLayout_4.addWidget(self.image_path)\n self.save_btn = QtGui.QPushButton(self.plot_tab)\n self.save_btn.setMaximumSize(QtCore.QSize(25, 16777215))\n self.save_btn.setObjectName(_fromUtf8(\"save_btn\"))\n self.horizontalLayout_4.addWidget(self.save_btn)\n self.verticalLayout_2.addLayout(self.horizontalLayout_4)\n self.specs_tab.addTab(self.plot_tab, _fromUtf8(\"\"))\n self.verticalLayout.addWidget(self.specs_tab)\n self.buttonBox = QtGui.QDialogButtonBox(Dialog)\n self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)\n self.buttonBox.setObjectName(_fromUtf8(\"buttonBox\"))\n self.verticalLayout.addWidget(self.buttonBox)\n\n self.retranslateUi(Dialog)\n self.specs_tab.setCurrentIndex(0)\n QtCore.QMetaObject.connectSlotsByName(Dialog)\n\n def retranslateUi(self, Dialog):\n Dialog.setWindowTitle(_translate(\"Dialog\", \"Method & Parameters\", None))\n self.method_lbl.setText(_translate(\"Dialog\", \"analysis method:\", None))\n self.specs_tab.setTabText(self.specs_tab.indexOf(self.methods_tab), _translate(\"Dialog\", \"method\", None))\n self.groupBox.setTitle(_translate(\"Dialog\", \"column labels\", None))\n self.label_2.setToolTip(_translate(\"Dialog\", \"Text for column title that appears on forest plot\", None))\n self.label_2.setText(_translate(\"Dialog\", \"col 1 label:\", None))\n self.col1_str_edit.setText(_translate(\"Dialog\", \"Studies\", None))\n self.show_1.setText(_translate(\"Dialog\", \"show\", None))\n self.label_4.setToolTip(_translate(\"Dialog\", \"Text for column title that appears on forest plot\", None))\n self.label_4.setText(_translate(\"Dialog\", \"col 2 label:\", None))\n self.col2_str_edit.setText(_translate(\"Dialog\", \"[default]\", None))\n self.show_2.setText(_translate(\"Dialog\", \"show\", None))\n self.label_5.setToolTip(_translate(\"Dialog\", \"Text for column title that appears on forest plot\", None))\n self.label_5.setText(_translate(\"Dialog\", \"col 3 label:\", None))\n self.col3_str_edit.setText(_translate(\"Dialog\", \"Ev/Trt\", None))\n self.show_3.setText(_translate(\"Dialog\", \"show\", None))\n self.label_6.setToolTip(_translate(\"Dialog\", \"Text for column title that appears on forest plot\", None))\n self.label_6.setText(_translate(\"Dialog\", \"col 4 label:\", None))\n self.col4_str_edit.setText(_translate(\"Dialog\", \"Ev/Ctrl\", None))\n self.show_4.setText(_translate(\"Dialog\", \"show\", None))\n self.label_7.setText(_translate(\"Dialog\", \"x label:\", None))\n self.x_lbl_le.setText(_translate(\"Dialog\", \"[default]\", None))\n self.label_8.setText(_translate(\"Dialog\", \"x ticks:\", None))\n self.x_ticks_le.setText(_translate(\"Dialog\", \"[default]\", None))\n self.label_9.setText(_translate(\"Dialog\", \"x-axis lower bound\", None))\n self.plot_lb_le.setText(_translate(\"Dialog\", \"[default]\", None))\n self.label_10.setText(_translate(\"Dialog\", \"x-axis upper bound\", None))\n self.plot_ub_le.setText(_translate(\"Dialog\", \"[default]\", None))\n self.label_11.setText(_translate(\"Dialog\", \"show summary line:\", None))\n self.label_3.setText(_translate(\"Dialog\", \"save image to:\", None))\n self.image_path.setText(_translate(\"Dialog\", \"./r_tmp/forest.png\", None))\n self.save_btn.setText(_translate(\"Dialog\", \"...\", None))\n self.specs_tab.setTabText(self.specs_tab.indexOf(self.plot_tab), _translate(\"Dialog\", \"forest plot\", None))\n\nimport icons_rc\n" }, { "alpha_fraction": 0.5933202505111694, "alphanum_fraction": 0.6031433939933777, "avg_line_length": 24.5, "blob_id": "ddff936fc2dc08dd707b571ca357ef3087c829d3", "content_id": "7fd4045124a4e3998f18362622c1dce9c18d1d8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 509, "license_type": "no_license", "max_line_length": 64, "num_lines": 20, "path": "/src/gsetup_win.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "##########################\n# Author: George Dietz #\n# CEBM @ Brown #\n#\n# DESCRIPTION: Script for py2exe for building windows executable\n# Run with > python gsetup.py py2exe --includes sip\n#\n##########################\n\n# setup.py to build windows executable\n\n\n#from py2exe.build_exe import py2exe\nfrom distutils.core import setup\nimport py2exe\n\n##path_to_launch_file = os.path.join(\"src\",\"launch.py\")\n\n#setup(windows=[{\"script\": 'launch.py'}])\nsetup(windows=[{\"script\": 'win_prelaunch.py'}])" }, { "alpha_fraction": 0.5532544255256653, "alphanum_fraction": 0.5562130212783813, "avg_line_length": 23.214284896850586, "blob_id": "3caa8cbbd1f98afa82d2705cfb7458aa63b6337a", "content_id": "02c43951dc54101cbf2437f8765414fc252e8d25", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 338, "license_type": "no_license", "max_line_length": 57, "num_lines": 14, "path": "/src/progress_bar.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "''''''''''''''''''''''''''''''\n' admittedly, kind of silly. '\n''''''''''''''''''''''''''''''\n\nfrom PyQt4.Qt import QDialog\n#import pdb\nimport forms.ui_running\n\n\nclass MetaProgress(QDialog, forms.ui_running.Ui_running):\n \n def __init__(self, parent=None):\n super(MetaProgress, self).__init__(parent)\n self.setupUi(self)" }, { "alpha_fraction": 0.6495835781097412, "alphanum_fraction": 0.6623958945274353, "avg_line_length": 34.5, "blob_id": "2c62009ce361e0cc0a0469204ce7324671b545dd", "content_id": "5dcda2cae7b51b2021dcbe1b81d88839032bd3b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1561, "license_type": "no_license", "max_line_length": 99, "num_lines": 44, "path": "/src/conf_level_dialog.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "'''\nCreated on Apr 29, 2013\n\n@author: George Dietz\n'''\n\n#from PyQt4.QtCore import *\n#from PyQt4.QtGui import *\nfrom PyQt4.QtCore import QString, SIGNAL, SLOT\nfrom PyQt4.QtGui import QDialog, QDialogButtonBox, QDoubleSpinBox, QHBoxLayout, QLabel, QVBoxLayout\n\nfrom meta_globals import DEFAULT_CONF_LEVEL\n\nclass ChangeConfLevelDlg(QDialog):\n ''' Dialog for changing confidence level '''\n \n def __init__(self, previous_value=DEFAULT_CONF_LEVEL, parent=None):\n super(ChangeConfLevelDlg, self).__init__(parent)\n \n cl_label = QLabel(\"Global Confidence Level:\")\n \n self.conf_level_spinbox = QDoubleSpinBox()\n self.conf_level_spinbox.setRange(50, 99.999 )\n self.conf_level_spinbox.setSingleStep(0.1)\n self.conf_level_spinbox.setSuffix(QString(\"%\"))\n self.conf_level_spinbox.setValue(previous_value)\n self.conf_level_spinbox.setDecimals(1)\n \n buttonBox = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)\n \n hlayout = QHBoxLayout()\n hlayout.addWidget(cl_label)\n hlayout.addWidget(self.conf_level_spinbox)\n vlayout = QVBoxLayout()\n vlayout.addLayout(hlayout)\n vlayout.addWidget(buttonBox)\n self.setLayout(vlayout)\n \n self.connect(buttonBox, SIGNAL(\"accepted()\"), self, SLOT(\"accept()\"))\n self.connect(buttonBox, SIGNAL(\"rejected()\"), self, SLOT(\"reject()\"))\n self.setWindowTitle(\"Change Confidence Level\")\n \n def get_value(self):\n return self.conf_level_spinbox.value()" }, { "alpha_fraction": 0.3632861077785492, "alphanum_fraction": 0.3847513198852539, "avg_line_length": 47.527976989746094, "blob_id": "09ba3d1a9e6ab1dd8b4dba33dfda6cfb94add464", "content_id": "d35f09c3a9039242dd394f7f19a78ca3796c5df7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 34521, "license_type": "no_license", "max_line_length": 91, "num_lines": 697, "path": "/src/R/HSROC/R/Initialization.R", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "Initialization <-\r\nfunction (first.run, random, param, cond.Ind, rs, GS_se, GS_sp, \r\n Data1, Data2, Data3, Data4, Data5, Data6, path, studies, \r\n sco = FALSE, psa, pst) \r\n{\r\n setwd(path)\r\n x = rs[[1]]\r\n model = read.table(\"model.txt\")\r\n a.beta = param[1]\r\n b.beta = param[2]\r\n a.THETA = param[3]\r\n b.THETA = param[4]\r\n a.LAMBDA = param[5]\r\n b.LAMBDA = param[6]\r\n a.disp.alpha = param[7]\r\n b.disp.alpha = param[8]\r\n a.disp.theta = param[9]\r\n b.disp.theta = param[10]\r\n a.pi = param[11]\r\n b.pi = param[12]\r\n if (length(param) == 12) {\r\n GS = TRUE\r\n }\r\n else {\r\n GS = FALSE\r\n }\r\n if (cond.Ind == \"TRUE\" & model == 1) {\r\n if (first.run == TRUE) {\r\n if (random == TRUE) {\r\n file.ini = \"Random Initial values.txt\"\r\n if (psa == \"sd\") {\r\n init.sigma.alpha = runif(1, 1/sqrt(b.disp.alpha), \r\n 1/sqrt(a.disp.alpha))\r\n prec.alpha = 1/(init.sigma.alpha)^2\r\n }\r\n else {\r\n if (psa == \"v\") {\r\n init.sigma.alpha = runif(1, 1/b.disp.alpha, \r\n 1/a.disp.alpha)\r\n prec.alpha = 1/(init.sigma.alpha)^2\r\n }\r\n else {\r\n if (psa == \"p\") {\r\n init.sigma.alpha = rgamma(1, shape = a.disp.alpha, \r\n scale = b.disp.alpha)\r\n prec.alpha = 1/(init.sigma.alpha)^2\r\n }\r\n }\r\n }\r\n if (pst == \"sd\") {\r\n init.sigma.theta = runif(1, 1/sqrt(b.disp.theta), \r\n 1/sqrt(a.disp.theta))\r\n prec.theta = 1/(init.sigma.theta)^2\r\n }\r\n else {\r\n if (pst == \"v\") {\r\n init.sigma.theta = runif(1, 1/b.disp.theta, \r\n 1/a.disp.theta)\r\n prec.theta = 1/(init.sigma.theta)^2\r\n }\r\n else {\r\n if (pst == \"p\") {\r\n init.sigma.theta = rgamma(1, shape = a.disp.theta, \r\n scale = b.disp.theta)\r\n prec.theta = 1/(init.sigma.theta)^2\r\n }\r\n }\r\n }\r\n init.THETA = runif(1, min = a.THETA, max = b.THETA)\r\n init.LAMBDA = runif(1, min = a.LAMBDA, max = b.LAMBDA)\r\n init.beta = runif(1, min = a.beta, max = b.beta)\r\n init.theta = rnorm(n = studies, mean = init.THETA, \r\n sd = init.sigma.theta)\r\n init.alpha = rnorm(n = studies, mean = init.LAMBDA, \r\n sd = init.sigma.alpha)\r\n init.S1 = rbeta(studies, shape1 = 1, shape2 = 1)\r\n init.C1 = rbeta(studies, shape1 = 1, shape2 = 1)\r\n init.PI = rbeta(studies, shape1 = 1, shape2 = 1)\r\n if (GS == TRUE) {\r\n study.ref_std = \"Perfect Reference standard\"\r\n }\r\n else {\r\n if (GS == FALSE) {\r\n if (GS_se == TRUE & GS_sp == FALSE) {\r\n a.C2 = param[13:(13 + (x - 1))]\r\n b.C2 = param[(13 + (x)):(13 + (2 * x - \r\n 1))]\r\n init.S2 = 1\r\n init.C2 = rbeta(x, shape1 = a.C2, shape2 = b.C2)\r\n study.ref_std = rbind(init.S2, init.C2)\r\n rownames(study.ref_std) = c(\"Perfect Sensitivity\", \r\n \"init.C2\")\r\n }\r\n else {\r\n if (GS_se == FALSE & GS_sp == TRUE) {\r\n a.S2 = param[13:(13 + (x - 1))]\r\n b.S2 = param[(13 + (x)):(13 + (2 * x - \r\n 1))]\r\n init.S2 = rbeta(x, shape1 = a.S2, shape2 = b.S2)\r\n init.C2 = 1\r\n study.ref_std = rbind(init.S2, init.C2)\r\n rownames(study.ref_std) = c(\"init.S2\", \r\n \"Perfect Specificity\")\r\n }\r\n else {\r\n if (GS_se == FALSE & GS_sp == FALSE) {\r\n a.S2 = param[13:(13 + (x - 1))]\r\n b.S2 = param[(13 + (x)):(13 + (2 * \r\n x - 1))]\r\n a.C2 = param[(13 + (2 * x)):(13 + (3 * \r\n x - 1))]\r\n b.C2 = param[(13 + (3 * x)):(13 + (4 * \r\n x - 1))]\r\n init.S2 = rbeta(x, shape1 = a.S2, shape2 = b.S2)\r\n init.C2 = rbeta(x, shape1 = a.C2, shape2 = b.C2)\r\n study.ref_std = rbind(init.S2, init.C2)\r\n rownames(study.ref_std) = c(\"init.S2\", \r\n \"init.C2\")\r\n }\r\n }\r\n }\r\n }\r\n }\r\n if (sco == TRUE) {\r\n study.indep_par = rbind(init.THETA, init.LAMBDA, \r\n init.sigma.alpha, prec.alpha, init.beta)\r\n rownames(study.indep_par) = c(\"init.THETA\", \r\n \"init.LAMBDA\", \"init.sigma.alpha\", \"prec.alpha\", \r\n \"init.beta\")\r\n study.dep_par = cbind(init.alpha, init.S1, \r\n init.C1, init.PI)\r\n colnames(study.dep_par) = cbind(\"init.alpha\", \r\n \"init.S1\", \"init.C1\", \"init.PI\")\r\n }\r\n else {\r\n if (sco == FALSE) {\r\n study.indep_par = rbind(init.THETA, init.sigma.theta, \r\n prec.theta, init.LAMBDA, init.sigma.alpha, \r\n prec.alpha, init.beta)\r\n rownames(study.indep_par) = c(\"init.THETA\", \r\n \"init.sigma.theta\", \"prec.theta\", \"init.LAMBDA\", \r\n \"init.sigma.alpha\", \"prec.alpha\", \"init.beta\")\r\n study.dep_par = cbind(init.alpha, init.theta, \r\n init.S1, init.C1, init.PI)\r\n colnames(study.dep_par) = cbind(\"init.alpha\", \r\n \"init.theta\", \"init.S1\", \"init.C1\", \"init.PI\")\r\n }\r\n }\r\n write.table(study.dep_par, file = file.ini, row.names = FALSE)\r\n write(paste(\"______________________________________________________\"), \r\n file = file.ini, append = TRUE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n write.table(study.indep_par, file = file.ini, \r\n append = TRUE, col.names = FALSE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n write(paste(\"______________________________________________________\"), \r\n file = file.ini, append = TRUE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n write.table(study.ref_std, file = file.ini, append = TRUE, \r\n col.names = FALSE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n if (sco == TRUE) {\r\n write(paste(\"The same cut-off value was assumed across each study.\"), \r\n file = file.ini, append = TRUE)\r\n }\r\n }\r\n else {\r\n file.ini = \"Initial values.txt\"\r\n init.LAMBDA = as.vector(Data4[3])\r\n init.sigma.alpha = as.vector(Data4[4])\r\n prec.alpha = as.vector((1/(init.sigma.alpha)^2))\r\n init.THETA = as.vector(Data4[1])\r\n init.sigma.theta = as.vector(Data4[2])\r\n prec.theta = as.vector((1/(init.sigma.theta)^2))\r\n init.beta = as.vector(Data4[5])\r\n init.alpha = Data1[, 1]\r\n init.theta = Data1[, 2]\r\n init.S1 = Data1[, 3]\r\n init.C1 = Data1[, 4]\r\n init.PI = Data1[, 5]\r\n if (GS == TRUE) {\r\n study.ref_std = \"Perfect Reference standard\"\r\n }\r\n else {\r\n if (GS == FALSE) {\r\n if (GS_se == TRUE & GS_sp == FALSE) {\r\n init.S2 = 1\r\n init.C2 = Data5[2, ]\r\n study.ref_std = rbind(init.S2, init.C2)\r\n rownames(study.ref_std) = c(\"Perfect Sensitivity\", \r\n \"init.C2\")\r\n }\r\n else {\r\n if (GS_se == FALSE & GS_sp == TRUE) {\r\n init.S2 = Data5[1, ]\r\n init.C2 = 1\r\n study.ref_std = rbind(init.S2, init.C2)\r\n rownames(study.ref_std) = c(\"init.S2\", \r\n \"Perfect Specificity\")\r\n }\r\n else {\r\n if (GS_se == FALSE & GS_sp == FALSE) {\r\n init.S2 = Data5[1, ]\r\n init.C2 = Data5[2, ]\r\n study.ref_std = rbind(init.S2, init.C2)\r\n rownames(study.ref_std) = c(\"init.S2\", \r\n \"init.C2\")\r\n }\r\n }\r\n }\r\n }\r\n }\r\n if (sco == TRUE) {\r\n study.indep_par = rbind(init.THETA, init.LAMBDA, \r\n init.sigma.alpha, prec.alpha, init.beta)\r\n rownames(study.indep_par) = c(\"init.THETA\", \r\n \"init.LAMBDA\", \"init.sigma.alpha\", \"prec.alpha\", \r\n \"init.beta\")\r\n study.dep_par = cbind(init.alpha, init.S1, \r\n init.C1, init.PI)\r\n colnames(study.dep_par) = cbind(\"init.alpha\", \r\n \"init.S1\", \"init.C1\", \"init.PI\")\r\n }\r\n else {\r\n if (sco == FALSE) {\r\n study.indep_par = rbind(init.THETA, init.sigma.theta, \r\n prec.theta, init.LAMBDA, init.sigma.alpha, \r\n prec.alpha, init.beta)\r\n rownames(study.indep_par) = c(\"init.THETA\", \r\n \"init.sigma.theta\", \"prec.theta\", \"init.LAMBDA\", \r\n \"init.sigma.alpha\", \"prec.alpha\", \"init.beta\")\r\n study.dep_par = cbind(init.alpha, init.theta, \r\n init.S1, init.C1, init.PI)\r\n colnames(study.dep_par) = cbind(\"init.alpha\", \r\n \"init.theta\", \"init.S1\", \"init.C1\", \"init.PI\")\r\n }\r\n }\r\n write.table(study.dep_par, file = file.ini, row.names = FALSE)\r\n write(paste(\"______________________________________________________\"), \r\n file = file.ini, append = TRUE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n write.table(study.indep_par, file = file.ini, \r\n append = TRUE, col.names = FALSE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n write(paste(\"______________________________________________________\"), \r\n file = file.ini, append = TRUE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n write.table(study.ref_std, file = file.ini, append = TRUE, \r\n col.names = FALSE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n if (sco == TRUE) {\r\n write(paste(\"The same cut-off value was assumed across each study.\"), \r\n file = file.ini, append = TRUE)\r\n }\r\n }\r\n }\r\n else {\r\n if (sco == TRUE) {\r\n init.sigma.alpha = Data3[2]\r\n prec.alpha = (1/(init.sigma.alpha)^2)\r\n init.THETA = Data3[3]\r\n init.LAMBDA = Data3[1]\r\n init.beta = Data3[4]\r\n init.alpha = Data2[, 1]\r\n init.S1 = Data2[, 2]\r\n init.C1 = Data2[, 3]\r\n init.PI = Data2[, 4]\r\n if (GS == FALSE) {\r\n init.S2 = Data6[1, ]\r\n init.C2 = Data6[2, ]\r\n }\r\n }\r\n else {\r\n if (sco == FALSE) {\r\n init.sigma.theta = Data3[4]\r\n prec.theta = (1/(init.sigma.theta)^2)\r\n init.sigma.alpha = Data3[2]\r\n prec.alpha = (1/(init.sigma.alpha)^2)\r\n init.THETA = Data3[3]\r\n init.LAMBDA = Data3[1]\r\n init.beta = Data3[5]\r\n init.alpha = Data2[, 1]\r\n init.theta = Data2[, 2]\r\n init.S1 = Data2[, 3]\r\n init.C1 = Data2[, 4]\r\n init.PI = Data2[, 5]\r\n if (GS == FALSE) {\r\n init.S2 = Data6[1, ]\r\n init.C2 = Data6[2, ]\r\n }\r\n }\r\n }\r\n }\r\n if (sco == TRUE) {\r\n inits1 = c(0, 0, init.sigma.alpha, prec.alpha, init.THETA, \r\n init.LAMBDA, init.beta)\r\n inits1 = as.matrix(inits1)\r\n rownames(inits1) = c(\"\", \"\", \"init.sigma.alpha\", \r\n \"prec.alpha\", \"init.THETA\", \"init.LAMBDA\", \"init.beta\")\r\n colnames(inits1) = c(\"\")\r\n }\r\n else {\r\n if (sco == FALSE) {\r\n inits1 = c(init.sigma.theta, prec.theta, init.sigma.alpha, \r\n prec.alpha, init.THETA, init.LAMBDA, init.beta)\r\n inits1 = as.matrix(inits1)\r\n rownames(inits1) = c(\"init.sigma.theta\", \"prec.theta\", \r\n \"init.sigma.alpha\", \"prec.alpha\", \"init.THETA\", \r\n \"init.LAMBDA\", \"init.beta\")\r\n colnames(inits1) = c(\"\")\r\n }\r\n }\r\n if (GS == TRUE) {\r\n inits3 = \"Perfect Reference standard\"\r\n }\r\n else {\r\n inits3 = rbind(init.S2, init.C2)\r\n rownames(inits3) = c(\"init.S2\", \"init.C2\")\r\n }\r\n }\r\n else {\r\n if (cond.Ind == TRUE & model == 2) {\r\n a.a1 = param[13:(13 + (x - 1))]\r\n b.a1 = param[(13 + (x)):(13 + (2 * x - 1))]\r\n a.a0 = param[(13 + (2 * x)):(13 + (3 * x - 1))]\r\n b.a0 = param[(13 + (3 * x)):(13 + (4 * x - 1))]\r\n if (first.run == TRUE) {\r\n if (random == TRUE) {\r\n file.ini = \"Random Initial values.txt\"\r\n init.THETA = runif(1, min = a.THETA, max = b.THETA)\r\n init.sigma.theta = runif(1, 1/sqrt(b.disp.theta), \r\n 1/sqrt(a.disp.theta))\r\n prec.theta = 1/(init.sigma.theta)^2\r\n init.LAMBDA = runif(1, min = a.LAMBDA, max = b.LAMBDA)\r\n init.sigma.alpha = runif(1, 1/sqrt(b.disp.alpha), \r\n 1/sqrt(a.disp.alpha))\r\n prec.alpha = 1/(init.sigma.alpha)^2\r\n init.beta = runif(1, min = a.beta, max = b.beta)\r\n init.theta = rnorm(n = studies, mean = init.THETA, \r\n sd = init.sigma.theta)\r\n init.alpha = rnorm(n = studies, mean = init.LAMBDA, \r\n sd = init.sigma.alpha)\r\n init.S1 = rbeta(studies, shape1 = 1, shape2 = 1)\r\n init.C1 = rbeta(studies, shape1 = 1, shape2 = 1)\r\n init.PI = rbeta(studies, shape1 = 1, shape2 = 1)\r\n if (GS == FALSE) {\r\n init.a1 = rnorm(x, mean = a.a1, sd = b.a1)\r\n init.a0 = rnorm(x, mean = a.a0, sd = b.a0)\r\n }\r\n study.dep_par = cbind(init.alpha, init.theta, \r\n init.S1, init.C1, init.PI)\r\n colnames(study.dep_par) = cbind(\"init.alpha\", \r\n \"init.theta\", \"init.S1\", \"init.C1\", \"init.PI\")\r\n study.indep_par = rbind(init.THETA, init.sigma.theta, \r\n prec.theta, init.LAMBDA, init.sigma.alpha, \r\n prec.alpha, init.beta)\r\n rownames(study.indep_par) = c(\"init.THETA\", \r\n \"init.sigma.theta\", \"prec.theta\", \"init.LAMBDA\", \r\n \"init.sigma.alpha\", \"prec.alpha\", \"init.beta\")\r\n if (GS == TRUE) {\r\n study.ref_std = \"Perfect Reference standard\"\r\n }\r\n else {\r\n study.ref_std = rbind(init.a1, init.a0)\r\n rownames(study.ref_std) = c(\"init.a1\", \"init.a0\")\r\n }\r\n write.table(study.dep_par, file = file.ini, \r\n row.names = FALSE)\r\n write(paste(\"______________________________________________________\"), \r\n file = file.ini, append = TRUE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n write.table(study.indep_par, file = file.ini, \r\n append = TRUE, col.names = FALSE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n write(paste(\"______________________________________________________\"), \r\n file = file.ini, append = TRUE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n write.table(study.ref_std, file = file.ini, \r\n append = TRUE, col.names = FALSE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n }\r\n else {\r\n file.ini = \"Initial values.txt\"\r\n init.LAMBDA = as.vector(Data4[3])\r\n init.sigma.alpha = as.vector(Data4[4])\r\n prec.alpha = as.vector((1/(init.sigma.alpha)^2))\r\n init.THETA = as.vector(Data4[1])\r\n init.sigma.theta = as.vector(Data4[2])\r\n prec.theta = as.vector((1/(init.sigma.theta)^2))\r\n init.beta = as.vector(Data4[5])\r\n init.alpha = Data1[, 1]\r\n init.theta = Data1[, 2]\r\n init.S1 = Data1[, 3]\r\n init.C1 = Data1[, 4]\r\n init.PI = Data1[, 5]\r\n if (GS == FALSE) {\r\n init.a1 = Data5[1, ]\r\n init.a0 = Data5[2, ]\r\n }\r\n study.indep_par = rbind(init.THETA, init.sigma.theta, \r\n prec.theta, init.LAMBDA, init.sigma.alpha, \r\n prec.alpha, init.beta)\r\n rownames(study.indep_par) = c(\"init.THETA\", \r\n \"init.sigma.theta\", \"prec.theta\", \"init.LAMBDA\", \r\n \"init.sigma.alpha\", \"prec.alpha\", \"init.beta\")\r\n study.dep_par = cbind(init.alpha, init.theta, \r\n init.S1, init.C1, init.PI)\r\n colnames(study.dep_par) = cbind(\"init.alpha\", \r\n \"init.theta\", \"init.S1\", \"init.C1\", \"init.PI\")\r\n if (GS == TRUE) {\r\n study.ref_std = \"Perfect Reference standard\"\r\n }\r\n else {\r\n study.ref_std = rbind(init.a1, init.a0)\r\n rownames(study.ref_std) = c(\"init.a1\", \"init.a0\")\r\n }\r\n write.table(study.dep_par, file = file.ini, \r\n row.names = FALSE)\r\n write(paste(\"______________________________________________________\"), \r\n file = file.ini, append = TRUE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n write.table(study.indep_par, file = file.ini, \r\n append = TRUE, col.names = FALSE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n write(paste(\"______________________________________________________\"), \r\n file = file.ini, append = TRUE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n write.table(study.ref_std, file = file.ini, \r\n append = TRUE, col.names = FALSE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n }\r\n }\r\n else {\r\n init.sigma.theta = Data3[4]\r\n prec.theta = (1/(init.sigma.theta)^2)\r\n init.sigma.alpha = Data3[2]\r\n prec.alpha = (1/(init.sigma.alpha)^2)\r\n init.THETA = Data3[3]\r\n init.LAMBDA = Data3[1]\r\n init.beta = Data3[5]\r\n init.alpha = Data2[, 1]\r\n init.theta = Data2[, 2]\r\n init.S1 = Data2[, 3]\r\n init.C1 = Data2[, 4]\r\n init.PI = Data2[, 5]\r\n if (GS == FALSE) {\r\n init.a1 = Data6[1, ]\r\n init.a0 = Data6[2, ]\r\n }\r\n }\r\n inits1 = c(init.sigma.theta, prec.theta, init.sigma.alpha, \r\n prec.alpha, init.THETA, init.LAMBDA, init.beta)\r\n inits1 = as.matrix(inits1)\r\n rownames(inits1) = c(\"init.sigma.theta\", \"prec.theta\", \r\n \"init.sigma.alpha\", \"prec.alpha\", \"init.THETA\", \r\n \"init.LAMBDA\", \"init.beta\")\r\n colnames(inits1) = c(\"\")\r\n if (GS == TRUE) {\r\n inits3 = \"Perfect Reference standard\"\r\n }\r\n else {\r\n inits3 = rbind(init.a1, init.a0)\r\n rownames(inits3) = c(\"init.a1\", \"init.a0\")\r\n }\r\n }\r\n else {\r\n if (cond.Ind == FALSE & model == 2) {\r\n a.d1 = param[13]\r\n b.d1 = param[14]\r\n a.d0 = param[15]\r\n b.d0 = param[16]\r\n a.a1 = param[17:(17 + (x - 1))]\r\n b.a1 = param[(17 + (x)):(17 + (2 * x - 1))]\r\n a.a0 = param[(17 + (2 * x)):(17 + (3 * x - 1))]\r\n b.a0 = param[(17 + (3 * x)):(17 + (4 * x - 1))]\r\n a.b1 = param[(17 + (4 * x)):(17 + (5 * x - 1))]\r\n b.b1 = param[(17 + (5 * x)):(17 + (6 * x - 1))]\r\n a.b0 = param[(17 + (6 * x)):(17 + (7 * x - 1))]\r\n b.b0 = param[(17 + (7 * x)):(17 + (8 * x - 1))]\r\n if (first.run == TRUE) {\r\n if (random == TRUE) {\r\n file.ini = \"Random Initial values.txt\"\r\n init.THETA = runif(1, min = a.THETA, max = b.THETA)\r\n init.sigma.theta = runif(1, 1/sqrt(b.disp.theta), \r\n 1/sqrt(a.disp.theta))\r\n prec.theta = 1/(init.sigma.theta)^2\r\n init.LAMBDA = runif(1, min = a.LAMBDA, max = b.LAMBDA)\r\n init.sigma.alpha = runif(1, 1/sqrt(b.disp.alpha), \r\n 1/sqrt(a.disp.alpha))\r\n prec.alpha = 1/(init.sigma.alpha)^2\r\n init.beta = runif(1, min = a.beta, max = b.beta)\r\n init.theta = rnorm(n = studies, mean = init.THETA, \r\n sd = init.sigma.theta)\r\n init.alpha = rnorm(n = studies, mean = init.LAMBDA, \r\n sd = init.sigma.alpha)\r\n init.S1 = rbeta(studies, shape1 = 1, shape2 = 1)\r\n init.C1 = rbeta(studies, shape1 = 1, shape2 = 1)\r\n init.PI = rbeta(studies, shape1 = 1, shape2 = 1)\r\n if (GS == FALSE) {\r\n init.a1 = rnorm(x, mean = a.a1, sd = b.a1)\r\n init.a0 = rnorm(x, mean = a.a0, sd = b.a0)\r\n init.b1 = runif(x, min = a.b1, max = b.b1)\r\n init.b0 = runif(x, min = a.b0, max = b.b0)\r\n init.d1 = runif(1, min = a.d1, max = b.d1)\r\n init.d0 = runif(1, min = a.d0, max = b.d0)\r\n }\r\n study.dep_par = cbind(init.alpha, init.theta, \r\n init.S1, init.C1, init.PI)\r\n colnames(study.dep_par) = cbind(\"init.alpha\", \r\n \"init.theta\", \"init.S1\", \"init.C1\", \"init.PI\")\r\n if (GS == TRUE) {\r\n study.ref_std = \"Perfect Reference standard\"\r\n study.indep_par = rbind(init.THETA, init.sigma.theta, \r\n prec.theta, init.LAMBDA, init.sigma.alpha, \r\n prec.alpha, init.beta)\r\n rownames(study.indep_par) = c(\"init.THETA\", \r\n \"init.sigma.theta\", \"prec.theta\", \"init.LAMBDA\", \r\n \"init.sigma.alpha\", \"prec.alpha\", \"init.beta\")\r\n }\r\n else {\r\n study.indep_par = rbind(init.THETA, init.sigma.theta, \r\n prec.theta, init.LAMBDA, init.sigma.alpha, \r\n prec.alpha, init.beta, init.d1, init.d0)\r\n rownames(study.indep_par) = c(\"init.THETA\", \r\n \"init.sigma.theta\", \"prec.theta\", \"init.LAMBDA\", \r\n \"init.sigma.alpha\", \"prec.alpha\", \"init.beta\", \r\n \"init.d1\", \"init.d0\")\r\n study.ref_std = rbind(init.a1, init.a0, \r\n init.b1, init.b0)\r\n rownames(study.ref_std) = c(\"init.a1\", \r\n \"init.a0\", \"init.b1\", \"init.b0\")\r\n }\r\n write.table(study.dep_par, file = file.ini, \r\n row.names = FALSE)\r\n write(paste(\"______________________________________________________\"), \r\n file = file.ini, append = TRUE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n write.table(study.indep_par, file = file.ini, \r\n append = TRUE, col.names = FALSE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n write(paste(\"______________________________________________________\"), \r\n file = file.ini, append = TRUE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n write.table(study.ref_std, file = file.ini, \r\n append = TRUE, col.names = FALSE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n }\r\n else {\r\n file.ini = \"Initial values.txt\"\r\n init.LAMBDA = as.vector(Data4[3])\r\n init.sigma.alpha = as.vector(Data4[4])\r\n prec.alpha = as.vector((1/(init.sigma.alpha)^2))\r\n init.THETA = as.vector(Data4[1])\r\n init.sigma.theta = as.vector(Data4[2])\r\n prec.theta = as.vector((1/(init.sigma.theta)^2))\r\n init.beta = as.vector(Data4[5])\r\n init.alpha = Data1[, 1]\r\n init.theta = Data1[, 2]\r\n init.S1 = Data1[, 3]\r\n init.C1 = Data1[, 4]\r\n init.PI = Data1[, 5]\r\n if (GS == FALSE) {\r\n init.d1 = as.vector(Data4[6])\r\n init.d0 = as.vector(Data4[7])\r\n init.a1 = Data5[1, ]\r\n init.a0 = Data5[2, ]\r\n init.b1 = Data5[3, ]\r\n init.b0 = Data5[4, ]\r\n }\r\n study.dep_par = cbind(init.alpha, init.theta, \r\n init.S1, init.C1, init.PI)\r\n colnames(study.dep_par) = cbind(\"init.alpha\", \r\n \"init.theta\", \"init.S1\", \"init.C1\", \"init.PI\")\r\n if (GS == TRUE) {\r\n study.ref_std = \"Perfect Reference standard\"\r\n study.indep_par = rbind(init.THETA, init.sigma.theta, \r\n prec.theta, init.LAMBDA, init.sigma.alpha, \r\n prec.alpha, init.beta)\r\n rownames(study.indep_par) = c(\"init.THETA\", \r\n \"init.sigma.theta\", \"prec.theta\", \"init.LAMBDA\", \r\n \"init.sigma.alpha\", \"prec.alpha\", \"init.beta\")\r\n }\r\n else {\r\n study.indep_par = rbind(init.THETA, init.sigma.theta, \r\n prec.theta, init.LAMBDA, init.sigma.alpha, \r\n prec.alpha, init.beta, init.d1, init.d0)\r\n rownames(study.indep_par) = c(\"init.THETA\", \r\n \"init.sigma.theta\", \"prec.theta\", \"init.LAMBDA\", \r\n \"init.sigma.alpha\", \"prec.alpha\", \"init.beta\", \r\n \"init.d1\", \"init.d0\")\r\n study.ref_std = rbind(init.a1, init.a0, \r\n init.b1, init.b0)\r\n rownames(study.ref_std) = c(\"init.a1\", \r\n \"init.a0\", \"init.b1\", \"init.b0\")\r\n }\r\n write.table(study.dep_par, file = file.ini, \r\n row.names = FALSE)\r\n write(paste(\"______________________________________________________\"), \r\n file = file.ini, append = TRUE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n write.table(study.indep_par, file = file.ini, \r\n append = TRUE, col.names = FALSE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n write(paste(\"______________________________________________________\"), \r\n file = file.ini, append = TRUE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n write.table(study.ref_std, file = file.ini, \r\n append = TRUE, col.names = FALSE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n write(paste(\"\"), file = file.ini, append = TRUE)\r\n }\r\n }\r\n else {\r\n init.sigma.theta = Data3[4]\r\n prec.theta = (1/(init.sigma.theta)^2)\r\n init.sigma.alpha = Data3[2]\r\n prec.alpha = (1/(init.sigma.alpha)^2)\r\n init.THETA = Data3[3]\r\n init.LAMBDA = Data3[1]\r\n init.beta = Data3[5]\r\n init.alpha = Data2[, 1]\r\n init.theta = Data2[, 2]\r\n init.S1 = Data2[, 3]\r\n init.C1 = Data2[, 4]\r\n init.PI = Data2[, 5]\r\n if (GS == FALSE) {\r\n init.d1 = Data3[6]\r\n init.d0 = Data3[7]\r\n init.a1 = Data6[1, ]\r\n init.a0 = Data6[2, ]\r\n init.b1 = Data6[3, ]\r\n init.b0 = Data6[4, ]\r\n }\r\n }\r\n if (GS == TRUE) {\r\n inits1 = c(init.sigma.theta, prec.theta, init.sigma.alpha, \r\n prec.alpha, init.THETA, init.LAMBDA, init.beta)\r\n inits1 = as.matrix(inits1)\r\n inits3 = \"Perfect Reference standard\"\r\n }\r\n else {\r\n inits1 = c(init.sigma.theta, prec.theta, init.sigma.alpha, \r\n prec.alpha, init.THETA, init.LAMBDA, init.beta, \r\n init.d1, init.d0)\r\n inits1 = as.matrix(inits1)\r\n inits3 = rbind(init.a1, init.a0, init.b1, init.b0)\r\n rownames(inits3) = c(\"init.a1\", \"init.a0\", \r\n \"init.b1\", \"init.b0\")\r\n }\r\n rownames(inits1) = c(\"init.sigma.theta\", \"prec.theta\", \r\n \"init.sigma.alpha\", \"prec.alpha\", \"init.THETA\", \r\n \"init.LAMBDA\", \"init.beta\", \"init.d1\", \"init.d0\")\r\n colnames(inits1) = c(\"\")\r\n }\r\n }\r\n }\r\n if (sco == TRUE) {\r\n inits2 = cbind(init.alpha, 0, init.S1, init.C1, init.PI)\r\n colnames(inits2) = c(\"init.alpha\", \"\", \"init.S1\", \"init.C1\", \r\n \"init.PI\")\r\n rownames(inits2) = seq(1, studies)\r\n }\r\n else {\r\n if (sco == FALSE) {\r\n inits2 = cbind(init.alpha, init.theta, init.S1, init.C1, \r\n init.PI)\r\n colnames(inits2) = c(\"init.alpha\", \"init.theta\", \r\n \"init.S1\", \"init.C1\", \"init.PI\")\r\n rownames(inits2) = seq(1, studies)\r\n }\r\n }\r\n inits = list(inits1, inits2, inits3)\r\n return(inits)\r\n}\r\n" }, { "alpha_fraction": 0.368979275226593, "alphanum_fraction": 0.3895946443080902, "avg_line_length": 50.766639709472656, "blob_id": "4fc0419fbf5d24c3d84c76d879691388de68c5fc", "content_id": "4c6ecfc4c39f8452b56ba1c0c82caa5993296b89", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 242683, "license_type": "no_license", "max_line_length": 223, "num_lines": 4688, "path": "/src/R/HSROC/R/HSROCSummary.R", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "HSROCSummary <-\nfunction (data, burn_in = 0, iter.keep = NULL, Thin = 1, sub_rs = NULL, \n point_estimate = c(\"median\", \"mean\"), path = getwd(), chain = NULL, \n tv = NULL, digit = 6, print_plot = FALSE, png.too=TRUE) \n{\n setwd(path)\n # keep a list of image names to png and PDF paths\n image.list <- list()\n if (missing(data)) \n stop(\"You must provide a valid 'data' argument\", call. = FALSE)\n N = length(data[, 1])\n if (burn_in < 0) {\n cat(\"The 'burn_in' argument must be greater or equal than zero. \\n\")\n stop(\"Please respecify and call HSROCSummary() again.\\n\")\n }\n if (is.null(iter.keep) == FALSE) {\n if (iter.keep < 0) {\n cat(\"The 'iter.keep' argument must be greater or equal than zero. \\n\")\n stop(\"Please respecify and call HSROCSummary() again.\\n\")\n }\n }\n if (Thin < 1) {\n cat(\"The 'Thin' argument must be greater or equal than 1. \\n\")\n stop(\"Please respecify and call HSROCSummary() again.\\n\")\n }\n if (is.null(sub_rs) == TRUE) {\n sub_rs = list(1, 1:N)\n }\n if (sub_rs[[1]] != (length(sub_rs) - 1)) {\n cat(paste(\"The value of the first element of 'sub_rs' (sub_rs[[1]] = \", \n sub_rs[[1]], \" ) does not match the number of remaining elements (length(sub_rs[[2:\", \n length(sub_rs), \"]])) = \", length(2:length(sub_rs)), \n \"\\n\", sep = \"\"))\n stop(\"Please respecify and call HSROCSummary() again.\\n\")\n }\n if (is.logical(print_plot) == FALSE) {\n cat(\"The 'print_plot' argument must be a logical object. \\n\")\n stop(\"Please respecify and call HSROCSummary() again.\\n\")\n }\n file.prior = \"Prior.information.txt\"\n prior_dist_PI = \"beta\"\n if (is.null(chain) == FALSE) {\n setwd(chain[[1]])\n nb_chains = length(chain)\n }\n else {\n nb_chains = 1\n }\n point_estimate = match.arg(point_estimate)\n prior.exist = file.exists(file.prior)\n if (prior.exist == FALSE) {\n stop(paste(\"Make sure the \\\"\", file.prior, \"\\\" file created by the 'HSROC' function is in the \\\"\", \n path, \"\\\" working directory. \\n\", sep = \"\"))\n }\n prior = read.table(file.prior, header = TRUE)\n model = read.table(\"model.txt\", header = FALSE)\n Gold_se = (read.table(\"S2.txt\", header = FALSE) == 1)\n Gold_sp = (read.table(\"C2.txt\", header = FALSE) == 1)\n prior_sig_t = read.table(\"Prior on sigma_theta.txt\", header = FALSE)\n prior_sig_a = read.table(\"Prior on sigma_alpha.txt\", header = FALSE)\n if (length(prior[, 1]) == 7) {\n Gold_Std = TRUE\n condInd = TRUE\n }\n else {\n if (length(prior[, 1]) > 7 & length(prior[, 1]) <= 7 + \n 2 * sub_rs[[1]]) {\n Gold_Std = FALSE\n condInd = TRUE\n }\n else {\n if (length(prior[, 1]) > 7 + 2 * sub_rs[[1]]) {\n Gold_Std = FALSE\n condInd = FALSE\n }\n }\n }\n if (is.null(tv) == FALSE) {\n real_life = FALSE\n if (sum(dim(tv[[1]])) != N + 5) {\n cat(paste(\"The true value for the within-study parameters were misspecified. Make sure the ordering described in the help file is preserved. \\n\"))\n stop(\"Please respecify and call HSROCSummary() again.\\n\")\n }\n if (length(tv[[2]]) != 7) {\n cat(paste(\"The true value for the between-study parameters were misspecified. Make sure the ordering described in the help file is preserved. \\n\"))\n stop(\"Please respecify and call HSROCSummary() again.\\n\")\n }\n if (Gold_Std == FALSE) {\n if (sum(dim(tv[[3]])) != sub_rs[[1]] + 2) {\n cat(paste(\"The true value for the test under evaluation were misspecified. Make sure the ordering described in the help file is preserved. \\n\"))\n stop(\"Please respecify and call HSROCSummary() again.\\n\")\n }\n }\n }\n else {\n real_life = TRUE\n }\n beta.a = prior[1, 1]\n beta.b = prior[1, 2]\n prior.THETA.lower = prior[2, 1]\n prior.THETA.upper = prior[2, 2]\n prior.LAMBDA.lower = prior[3, 1]\n prior.LAMBDA.upper = prior[3, 2]\n l.disp.alpha = prior[4, 1]\n u.disp.alpha = prior[4, 2]\n l.disp.theta = prior[5, 1]\n u.disp.theta = prior[5, 2]\n low.pi = prior[6, 1]\n up.pi = prior[6, 2]\n low.rj = prior[7, 1]\n up.rj = prior[7, 2]\n SCO = FALSE\n rs.length = sub_rs[[1]]\n if (condInd == TRUE & Gold_Std == FALSE & model == 1) {\n if (Gold_se == TRUE & Gold_sp == FALSE) {\n low.sp = prior[8:(8 + (rs.length - 1)), 1]\n up.sp = prior[8:(8 + (rs.length - 1)), 2]\n prior_dist_S2 = NULL\n prior_dist_C2 = \"beta\"\n }\n else {\n if (Gold_sp == TRUE & Gold_se == FALSE) {\n low.se = prior[8:(8 + (rs.length - 1)), 1]\n up.se = prior[8:(8 + (rs.length - 1)), 2]\n prior_dist_C2 = NULL\n prior_dist_S2 = \"beta\"\n }\n else {\n low.se = prior[8:(8 + (rs.length - 1)), 1]\n up.se = prior[8:(8 + (rs.length - 1)), 2]\n low.sp = prior[(8 + rs.length):(8 + (2 * rs.length - \n 1)), 1]\n up.sp = prior[(8 + rs.length):(8 + (2 * rs.length - \n 1)), 2]\n prior_dist_S2 = \"beta\"\n prior_dist_C2 = \"beta\"\n }\n }\n }\n else {\n if (condInd == TRUE & Gold_Std == FALSE & model == 2) {\n mean.a1 = prior[8:(8 + (rs.length - 1)), 1]\n sd.a1 = prior[8:(8 + (rs.length - 1)), 2]\n mean.a0 = prior[(8 + rs.length):(8 + (2 * rs.length - \n 1)), 1]\n sd.a0 = prior[(8 + rs.length):(8 + (2 * rs.length - \n 1)), 2]\n }\n else {\n if (condInd == FALSE) {\n low.d1 = prior[8, 1]\n up.d1 = prior[8, 2]\n low.d0 = prior[9, 1]\n up.d0 = prior[9, 2]\n mean.a1 = prior[10:(10 + (rs.length - 1)), 1]\n sd.a1 = prior[10:(10 + (rs.length - 1)), 2]\n mean.a0 = prior[(10 + rs.length):(10 + (2 * rs.length - \n 1)), 1]\n sd.a0 = prior[(10 + rs.length):(10 + (2 * rs.length - \n 1)), 2]\n low.b1 = prior[(10 + (2 * rs.length)):(10 + (3 * \n rs.length - 1)), 1]\n up.b1 = prior[(10 + (2 * rs.length)):(10 + (3 * \n rs.length - 1)), 2]\n low.b0 = prior[(10 + (3 * rs.length)):(10 + (4 * \n rs.length - 1)), 1]\n up.b0 = prior[(10 + (3 * rs.length)):(10 + (4 * \n rs.length - 1)), 2]\n }\n }\n }\n if (prior_dist_PI == \"beta\") {\n alpha.PI = beta.parameter(low = low.pi, up = up.pi)[1, \n ]\n beta.PI = beta.parameter(low = low.pi, up = up.pi)[2, \n ]\n }\n else {\n if (prior_dist_PI == \"uniform\") {\n alpha.PI = low.pi\n beta.PI = up.pi\n }\n }\n if (model == 1) {\n if (Gold_se == TRUE) {\n Sens2.alpha = Sens2.beta = NULL\n }\n else {\n Sens2.alpha = beta.parameter(low = low.se, up = up.se)[1, \n ]\n Sens2.beta = beta.parameter(low = low.se, up = up.se)[2, \n ]\n }\n }\n if (model == 1) {\n if (Gold_sp == TRUE) {\n Spec2.alpha = Spec2.beta = NULL\n }\n else {\n Spec2.alpha = beta.parameter(low = low.sp, up = up.sp)[1, \n ]\n Spec2.beta = beta.parameter(low = low.sp, up = up.sp)[2, \n ]\n }\n }\n file.theta = \"theta.txt\"\n file.alpha = \"alpha.txt\"\n file.capital.THETA = \"capital_THETA.txt\"\n file.LAMBDA = \"LAMBDA.txt\"\n file.beta = \"beta.txt\"\n file.PI = \"PI.txt\"\n file.sigma.alpha = \"sigma.alpha.txt\"\n file.sigma.theta = \"sigma.theta.txt\"\n file.Sens2 = \"Sens2.txt\"\n file.Spec2 = \"Spec2.txt\"\n file.Sens1 = \"Sens1.txt\"\n file.Spec1 = \"Spec1.txt\"\n file.result = \"estimate.txt\"\n file.C_overall = \"C_overall.txt\"\n file.S_overall = \"S_overall.txt\"\n file.d1 = \"d1.txt\"\n file.d0 = \"d0.txt\"\n file.a1 = \"a1.txt\"\n file.a0 = \"a0.txt\"\n file.b1 = \"b1.txt\"\n file.b0 = \"b0.txt\"\n numb.iter = scan(\"iter.txt\", quiet = TRUE)\n if (is.null(iter.keep) == TRUE) {\n iter.num = numb.iter\n }\n else {\n iter.num = iter.keep\n }\n if ((iter.num - burn_in)/Thin < 100) \n stop(\"You don't have enough iterations to estimate the MC error. After taking into account the \\\"burn in\\\" and \\\"thinning interval\\\", you need at least 100 iterations to proceed.\")\n if (is.null(chain) == TRUE) {\n N1 = N * iter.num\n t1 <- file(\"alpha.txt\", \"rb\")\n t2 <- file(\"theta.txt\", \"rb\")\n t3 <- file(\"PI.txt\", \"rb\")\n t4 <- file(\"Sens1.txt\", \"rb\")\n t5 <- file(\"Spec1.txt\", \"rb\")\n alpha_bin = readBin(t1, double(), n = N1, endian = \"little\")\n theta_bin = readBin(t2, double(), n = N1, endian = \"little\")\n pi_bin = readBin(t3, double(), n = N1, endian = \"little\")\n Sens1_bin = readBin(t4, double(), n = N1, endian = \"little\")\n Spec1_bin = readBin(t5, double(), n = N1, endian = \"little\")\n alpha = matrix(0, ncol = N, nrow = iter.num)\n theta = matrix(0, ncol = N, nrow = iter.num)\n PI = matrix(0, ncol = N, nrow = iter.num)\n S1 = matrix(0, ncol = N, nrow = iter.num)\n C1 = matrix(0, ncol = N, nrow = iter.num)\n for (i in 1:N) {\n sequence = seq(i, iter.num * N, N)\n alpha[, i] = alpha_bin[sequence]\n theta[, i] = theta_bin[sequence]\n PI[, i] = pi_bin[sequence]\n S1[, i] = Sens1_bin[sequence]\n C1[, i] = Spec1_bin[sequence]\n }\n close(t1)\n close(t2)\n close(t3)\n close(t4)\n close(t5)\n t6 <- file(\"capital_THETA.txt\", \"rb\")\n THETA = readBin(t6, double(), n = iter.num, endian = \"little\")\n close(t6)\n t7 <- file(\"LAMBDA.txt\", \"rb\")\n LAMBDA = readBin(t7, double(), n = iter.num, endian = \"little\")\n close(t7)\n t8 <- file(\"beta.txt\", \"rb\")\n beta = readBin(t8, double(), n = iter.num, endian = \"little\")\n close(t8)\n t9 <- file(\"Sens1_new.txt\", \"rb\")\n S1_new = readBin(t9, double(), n = iter.num, endian = \"little\")\n close(t9)\n t10 <- file(\"Spec1_new.txt\", \"rb\")\n C1_new = readBin(t10, double(), n = iter.num, endian = \"little\")\n close(t10)\n t11 <- file(\"sigma.alpha.txt\", \"rb\")\n sigma.alpha = readBin(t11, double(), n = iter.num, endian = \"little\")\n close(t11)\n t12 <- file(\"sigma.theta.txt\", \"rb\")\n sigma.theta = readBin(t12, double(), n = iter.num, endian = \"little\")\n close(t12)\n t14 <- file(\"S_overall.txt\", \"rb\")\n S_overall = readBin(t14, double(), n = iter.num, endian = \"little\")\n close(t14)\n t15 <- file(\"C_overall.txt\", \"rb\")\n C_overall = readBin(t15, double(), n = iter.num, endian = \"little\")\n close(t15)\n total = iter.num\n q = burn_in\n alpha = alpha[(q + 1):total, ]\n THETA = THETA[(q + 1):total]\n LAMBDA = LAMBDA[(q + 1):total]\n beta = beta[(q + 1):total]\n PI = PI[(q + 1):total, ]\n sigma.alpha = sigma.alpha[(q + 1):total]\n S1 = S1[(q + 1):total, ]\n C1 = C1[(q + 1):total, ]\n S1_new = S1_new[(q + 1):total]\n C1_new = C1_new[(q + 1):total]\n C_overall = C_overall[(q + 1):total]\n S_overall = S_overall[(q + 1):total]\n theta = theta[(q + 1):total, ]\n sigma.theta = sigma.theta[(q + 1):total]\n taille = length((q + 1):total)\n thin.interval = Thin\n thin = seq(1, taille, by = thin.interval)\n alpha = as.matrix(alpha)\n alpha = alpha[thin, ]\n THETA = THETA[thin]\n LAMBDA = LAMBDA[thin]\n beta = beta[thin]\n PI = as.matrix(PI)\n PI = PI[thin, ]\n sigma.alpha = sigma.alpha[thin]\n S1 = as.matrix(S1)\n S1 = S1[thin, ]\n C1 = as.matrix(C1)\n C1 = C1[thin, ]\n S1_new = S1_new[thin]\n C1_new = C1_new[thin]\n C_overall = C_overall[thin]\n S_overall = S_overall[thin]\n theta = as.matrix(theta)\n theta = theta[thin, ]\n sigma.theta = sigma.theta[thin]\n if (condInd == TRUE & Gold_Std == FALSE & model == 1) {\n if (Gold_se == TRUE & Gold_sp == FALSE) {\n C2 = read.table(file.Spec2)\n C2 = C2[q:total, ]\n C2 = as.matrix(C2)\n C2 = C2[thin, ]\n }\n else {\n if (Gold_sp == TRUE & Gold_se == FALSE) {\n S2 = read.table(file.Sens2)\n S2 = S2[(q + 1):total, ]\n S2 = as.matrix(S2)\n S2 = S2[thin, ]\n }\n else {\n N2 = rs.length * iter.num\n t16 <- file(\"Sens2.txt\", \"rb\")\n t17 <- file(\"Spec2.txt\", \"rb\")\n Sens2_bin = readBin(t16, double(), n = N1, \n endian = \"little\")\n Spec2_bin = readBin(t17, double(), n = N1, \n endian = \"little\")\n S2 = matrix(0, ncol = rs.length, nrow = iter.num)\n C2 = matrix(0, ncol = rs.length, nrow = iter.num)\n for (j in 1:rs.length) {\n sequence = seq(j, iter.num * rs.length, rs.length)\n S2[, j] = Sens2_bin[sequence]\n C2[, j] = Spec2_bin[sequence]\n }\n close(t16)\n close(t17)\n S2 = S2[(q + 1):total, ]\n C2 = C2[(q + 1):total, ]\n S2 = as.matrix(S2)\n C2 = as.matrix(C2)\n S2 = S2[thin, ]\n C2 = C2[thin, ]\n }\n }\n }\n else {\n if (condInd == TRUE & Gold_Std == FALSE & model == \n 2) {\n a1 = read.table(file.a1)\n a0 = read.table(file.a0)\n S2 = read.table(file.Sens2)\n C2 = read.table(file.Spec2)\n a1 = a1[(q + 1):total, ]\n a0 = a0[(q + 1):total, ]\n S2 = S2[(q + 1):total, ]\n C2 = C2[(q + 1):total, ]\n a1 = as.matrix(a1)\n a0 = as.matrix(a0)\n a1 = a1[thin, ]\n a0 = a0[thin, ]\n S2 = as.matrix(S2)\n C2 = as.matrix(C2)\n S2 = S2[thin, ]\n C2 = C2[thin, ]\n }\n else {\n if (condInd == FALSE) {\n d1 = read.table(file.d1)\n d0 = read.table(file.d0)\n a1 = read.table(file.a1)\n a0 = read.table(file.a0)\n b1 = read.table(file.b1)\n b0 = read.table(file.b0)\n S2 = read.table(file.Sens2)\n C2 = read.table(file.Spec2)\n d1 = d1[(q + 1):total, ]\n d0 = d0[(q + 1):total, ]\n a1 = a1[(q + 1):total, ]\n a0 = a0[(q + 1):total, ]\n b1 = b1[(q + 1):total, ]\n b0 = b0[(q + 1):total, ]\n S2 = S2[(q + 1):total, ]\n C2 = C2[(q + 1):total, ]\n d1 = as.matrix(d1)\n d0 = as.matrix(d0)\n a1 = as.matrix(a1)\n a0 = as.matrix(a0)\n b1 = as.matrix(b1)\n b0 = as.matrix(b0)\n S2 = as.matrix(S2)\n C2 = as.matrix(C2)\n d1 = d1[thin, ]\n d0 = d0[thin, ]\n a1 = a1[thin, ]\n a0 = a0[thin, ]\n b1 = b1[thin, ]\n b0 = b0[thin, ]\n S2 = S2[thin, ]\n C2 = C2[thin, ]\n }\n }\n }\n }\n else {\n if (is.null(chain) == FALSE) {\n K = length(chain)\n theta = alpha = THETA = LAMBDA = beta = PI = sigma.alpha = sigma.theta = S1 = C1 = S1_new = C1_new = S2 = C2 = S_overall = C_overall = a1 = a0 = b1 = b0 = d1 = d0 = numeric()\n for (k in 1:K) {\n setwd(chain[[k]])\n N1 = N * iter.num\n t1 <- file(\"alpha.txt\", \"rb\")\n t2 <- file(\"theta.txt\", \"rb\")\n t3 <- file(\"PI.txt\", \"rb\")\n t4 <- file(\"Sens1.txt\", \"rb\")\n t5 <- file(\"Spec1.txt\", \"rb\")\n alpha_bin = readBin(t1, double(), n = N1, endian = \"little\")\n theta_bin = readBin(t2, double(), n = N1, endian = \"little\")\n pi_bin = readBin(t3, double(), n = N1, endian = \"little\")\n Sens1_bin = readBin(t4, double(), n = N1, endian = \"little\")\n Spec1_bin = readBin(t5, double(), n = N1, endian = \"little\")\n a = matrix(0, ncol = N, nrow = iter.num)\n t = matrix(0, ncol = N, nrow = iter.num)\n p = matrix(0, ncol = N, nrow = iter.num)\n S.1 = matrix(0, ncol = N, nrow = iter.num)\n C.1 = matrix(0, ncol = N, nrow = iter.num)\n for (i in 1:N) {\n sequence = seq(i, iter.num * N, N)\n a[, i] = alpha_bin[sequence]\n t[, i] = theta_bin[sequence]\n p[, i] = pi_bin[sequence]\n S.1[, i] = Sens1_bin[sequence]\n C.1[, i] = Spec1_bin[sequence]\n }\n close(t1)\n close(t2)\n close(t3)\n close(t4)\n close(t5)\n t6 <- file(\"capital_THETA.txt\", \"rb\")\n T = readBin(t6, double(), n = iter.num, endian = \"little\")\n close(t6)\n t7 <- file(\"LAMBDA.txt\", \"rb\")\n L = readBin(t7, double(), n = iter.num, endian = \"little\")\n close(t7)\n t8 <- file(\"beta.txt\", \"rb\")\n b = readBin(t8, double(), n = iter.num, endian = \"little\")\n close(t8)\n t9 <- file(\"Sens1_new.txt\", \"rb\")\n S.1_new = readBin(t9, double(), n = iter.num, \n endian = \"little\")\n close(t9)\n t10 <- file(\"Spec1_new.txt\", \"rb\")\n C.1_new = readBin(t10, double(), n = iter.num, \n endian = \"little\")\n close(t10)\n t11 <- file(\"sigma.alpha.txt\", \"rb\")\n sig.a = readBin(t11, double(), n = iter.num, \n endian = \"little\")\n close(t11)\n t12 <- file(\"sigma.theta.txt\", \"rb\")\n sig.t = readBin(t12, double(), n = iter.num, \n endian = \"little\")\n close(t12)\n t14 <- file(\"S_overall.txt\", \"rb\")\n S.ov = readBin(t14, double(), n = iter.num, endian = \"little\")\n close(t14)\n t15 <- file(\"C_overall.txt\", \"rb\")\n C.ov = readBin(t15, double(), n = iter.num, endian = \"little\")\n close(t15)\n total = iter.num\n q = burn_in\n a = a[(q + 1):total, ]\n T = T[(q + 1):total]\n L = L[(q + 1):total]\n b = b[(q + 1):total]\n p = p[(q + 1):total, ]\n sig.a = sig.a[(q + 1):total]\n S.1 = S.1[(q + 1):total, ]\n C.1 = C.1[(q + 1):total, ]\n S.1_new = S.1_new[(q + 1):total]\n C.1_new = C.1_new[(q + 1):total]\n C.ov = C.ov[(q + 1):total]\n S.ov = S.ov[(q + 1):total]\n t = t[(q + 1):total, ]\n sig.t = sig.t[(q + 1):total]\n taille = length((q + 1):total)\n thin.interval = Thin\n thin = seq(1, taille, by = thin.interval)\n a = as.matrix(a)\n a = a[thin, ]\n T = T[thin]\n L = L[thin]\n b = b[thin]\n p = as.matrix(p)\n p = p[thin, ]\n sig.a = sig.a[thin]\n S.1 = as.matrix(S.1)\n S.1 = S.1[thin, ]\n C.1 = as.matrix(C.1)\n C.1 = C.1[thin, ]\n S.1_new = S.1_new[thin]\n C.1_new = C.1_new[thin]\n C.ov = C.ov[thin]\n S.ov = S.ov[thin]\n t = as.matrix(t)\n t = t[thin, ]\n sig.t = sig.t[thin]\n alpha = rbind(alpha, as.matrix(a))\n THETA = rbind(THETA, as.matrix(T))\n LAMBDA = rbind(LAMBDA, as.matrix(L))\n beta = rbind(beta, as.matrix(b))\n PI = rbind(PI, as.matrix(p))\n sigma.alpha = rbind(sigma.alpha, as.matrix(sig.a))\n S1 = rbind(S1, as.matrix(S.1))\n C1 = rbind(C1, as.matrix(C.1))\n S1_new = rbind(S1_new, as.matrix(S.1_new))\n C1_new = rbind(C1_new, as.matrix(C.1_new))\n C_overall = rbind(C_overall, as.matrix(C.ov))\n S_overall = rbind(S_overall, as.matrix(S.ov))\n theta = rbind(theta, as.matrix(t))\n sigma.theta = rbind(sigma.theta, as.matrix(sig.t))\n if (condInd == TRUE & Gold_Std == FALSE & model == \n 1) {\n if (Gold_se == TRUE & Gold_sp == FALSE) {\n C.2 = read.table(file.Spec2)\n C.2 = C.2[(q + 1):total, ]\n C.2 = as.matrix(C.2)\n C.2 = C.2[thin, ]\n C2 = rbind(C2, as.matrix(C.2))\n }\n else {\n if (Gold_sp == TRUE & Gold_se == FALSE) {\n S.2 = read.table(file.Sens2)\n S.2 = S.2[(q + 1):total, ]\n S.2 = as.matrix(S.2)\n S.2 = S.2[thin, ]\n S2 = rbind(S2, as.matrix(S.2))\n }\n else {\n N2 = rs.length * iter.num\n t16 <- file(\"Sens2.txt\", \"rb\")\n t17 <- file(\"Spec2.txt\", \"rb\")\n Sens2_bin = readBin(t16, double(), n = N1, \n endian = \"little\")\n Spec2_bin = readBin(t17, double(), n = N1, \n endian = \"little\")\n S.2 = matrix(0, ncol = rs.length, nrow = iter.num)\n C.2 = matrix(0, ncol = rs.length, nrow = iter.num)\n for (i in 1:rs.length) {\n sequence = seq(i, iter.num * rs.length, \n rs.length)\n S.2[, i] = Sens2_bin[sequence]\n C.2[, i] = Spec2_bin[sequence]\n }\n close(t16)\n close(t17)\n S.2 = S.2[(q + 1):total, ]\n C.2 = C.2[(q + 1):total, ]\n S.2 = as.matrix(S.2)\n C.2 = as.matrix(C.2)\n S.2 = S.2[thin, ]\n C.2 = C.2[thin, ]\n S2 = rbind(S2, as.matrix(S.2))\n C2 = rbind(C2, as.matrix(C.2))\n }\n }\n }\n else {\n if (condInd == TRUE & Gold_Std == FALSE & model == \n 2) {\n a.1 = read.table(file.a1)\n a.0 = read.table(file.a0)\n S.2 = read.table(file.Sens2)\n C.2 = read.table(file.Spec2)\n a.1 = a.1[(q + 1):total, ]\n a.0 = a.0[(q + 1):total, ]\n S.2 = S.2[(q + 1):total, ]\n C.2 = C.2[(q + 1):total, ]\n a.1 = as.matrix(a.1)\n a.0 = as.matrix(a.0)\n a.1 = a.1[thin, ]\n a.0 = a.0[thin, ]\n S.2 = as.matrix(S.2)\n C.2 = as.matrix(C.2)\n S.2 = S.2[thin, ]\n C.2 = C.2[thin, ]\n a1 = rbind(a1, as.matrix(a.1))\n a0 = rbind(a0, as.matrix(a.0))\n S2 = rbind(S2, as.matrix(S.2))\n C2 = rbind(C2, as.matrix(C.2))\n }\n else {\n if (condInd == FALSE) {\n d.1 = read.table(file.d1)\n d.0 = read.table(file.d0)\n a.1 = read.table(file.a1)\n a.0 = read.table(file.a0)\n b.1 = read.table(file.b1)\n b.0 = read.table(file.b0)\n S.2 = read.table(file.Sens2)\n C.2 = read.table(file.Spec2)\n d.1 = d.1[(q + 1):total, ]\n d.0 = d.0[(q + 1):total, ]\n a.1 = a.1[(q + 1):total, ]\n a.0 = a.0[(q + 1):total, ]\n b.1 = b.1[(q + 1):total, ]\n b.0 = b.0[(q + 1):total, ]\n S.2 = S.2[(q + 1):total, ]\n C.2 = C.2[(q + 1):total, ]\n d.1 = as.matrix(d.1)\n d.0 = as.matrix(d.0)\n a.1 = as.matrix(a.1)\n a.0 = as.matrix(a.0)\n b.1 = as.matrix(b.1)\n b.0 = as.matrix(b.0)\n d.1 = d.1[thin, ]\n d.0 = d.0[thin, ]\n a.1 = a.1[thin, ]\n a.0 = a.0[thin, ]\n b.1 = b.1[thin, ]\n b.0 = b.0[thin, ]\n S.2 = as.matrix(S.2)\n C.2 = as.matrix(C.2)\n S.2 = S.2[thin, ]\n C.2 = C.2[thin, ]\n a1 = rbind(a1, as.matrix(a.1))\n a0 = rbind(a0, as.matrix(a.0))\n b1 = rbind(b1, as.matrix(b.1))\n b0 = rbind(b0, as.matrix(b.0))\n d1 = rbind(d1, as.matrix(d.1))\n d0 = rbind(d0, as.matrix(d.0))\n S2 = rbind(S2, as.matrix(S.2))\n C2 = rbind(C2, as.matrix(C.2))\n }\n }\n }\n }\n }\n }\n setwd(path)\n alpha = as.mcmc(alpha)\n THETA = as.mcmc(THETA)\n LAMBDA = as.mcmc(LAMBDA)\n beta = as.mcmc(beta)\n PI = as.mcmc(PI)\n sigma.alpha = as.mcmc(sigma.alpha)\n S1 = as.mcmc(S1)\n C1 = as.mcmc(C1)\n S1_new = as.mcmc(S1_new)\n C1_new = as.mcmc(C1_new)\n C_overall = as.mcmc(C_overall)\n S_overall = as.mcmc(S_overall)\n theta = as.mcmc(theta)\n sigma.theta = as.mcmc(sigma.theta)\n if (point_estimate == \"mean\") {\n mean_OR_med = 2\n }\n else {\n if (point_estimate == \"median\") {\n mean_OR_med = 3\n }\n }\n iter.size = length(beta)\n theta.est = apply(as.matrix(theta), 2, point_estimate)\n theta.HPD = HPDinterval(theta)\n theta.sd = sd(theta)\n sigma.theta.est = apply(as.matrix(sigma.theta), 2, point_estimate)\n sigma.theta.HPD = HPDinterval(sigma.theta)\n sigma.theta.sd = sd(sigma.theta)\n alpha.est = apply(as.matrix(alpha), 2, point_estimate)\n alpha.HPD = HPDinterval(alpha)\n alpha.sd = sd(alpha)\n THETA.est = apply(as.matrix(THETA), 2, point_estimate)\n THETA.HPD = HPDinterval(THETA)\n THETA.sd = sd(THETA)\n LAMBDA.est = apply(as.matrix(LAMBDA), 2, point_estimate)\n LAMBDA.HPD = HPDinterval(LAMBDA)\n LAMBDA.sd = sd(LAMBDA)\n beta.est = apply(as.matrix(beta), 2, point_estimate)\n beta.HPD = HPDinterval(beta)\n beta.sd = sd(beta)\n PI.est = apply(as.matrix(PI), 2, point_estimate)\n PI.HPD = HPDinterval(PI)\n PI.sd = sd(PI)\n sigma.alpha.est = apply(as.matrix(sigma.alpha), 2, point_estimate)\n sigma.alpha.HPD = HPDinterval(sigma.alpha)\n sigma.alpha.sd = sd(sigma.alpha)\n S1.est = apply(as.matrix(S1), 2, point_estimate)\n S1.HPD = HPDinterval(S1)\n S1.sd = sd(S1)\n C1.est = apply(as.matrix(C1), 2, point_estimate)\n C1.HPD = HPDinterval(C1)\n C1.sd = sd(C1)\n S1_new.est = apply(as.matrix(S1_new), 2, point_estimate)\n S1_new.HPD = HPDinterval(S1_new)\n S1_new.sd = sd(S1_new)\n C1_new.est = apply(as.matrix(C1_new), 2, point_estimate)\n C1_new.HPD = HPDinterval(C1_new)\n C1_new.sd = sd(C1_new)\n C_overall.est = apply(as.matrix(C_overall), 2, point_estimate)\n C_overall.HPD = HPDinterval(C_overall)\n C_overall.sd = sd(C_overall)\n S_overall.est = apply(as.matrix(S_overall), 2, point_estimate)\n S_overall.HPD = HPDinterval(S_overall)\n S_overall.sd = sd(S_overall)\n if (condInd == TRUE & Gold_Std == FALSE & model == 1) {\n if (Gold_se == TRUE & Gold_sp == FALSE) {\n C2 = as.mcmc(C2)\n C2.est = apply(as.matrix(C2), 2, point_estimate)\n C2.HPD = HPDinterval(C2)\n C2.sd = sd(C2)\n }\n else {\n if (Gold_sp == TRUE & Gold_se == FALSE) {\n S2 = as.mcmc(S2)\n S2.est = apply(as.matrix(S2), 2, point_estimate)\n S2.HPD = HPDinterval(S2)\n S2.sd = sd(S2)\n }\n else {\n S2 = as.mcmc(S2)\n C2 = as.mcmc(C2)\n S2.est = apply(as.matrix(S2), 2, point_estimate)\n S2.HPD = HPDinterval(S2)\n S2.sd = sd(S2)\n C2.est = apply(as.matrix(C2), 2, point_estimate)\n C2.HPD = HPDinterval(C2)\n C2.sd = sd(C2)\n }\n }\n }\n else {\n if (condInd == TRUE & Gold_Std == FALSE & model == 2) {\n a1 = as.mcmc(a1)\n a0 = as.mcmc(a0)\n S2 = as.mcmc(S2)\n C2 = as.mcmc(C2)\n a1.est = apply(as.matrix(a1), 2, point_estimate)\n a1.HPD = HPDinterval(a1)\n a1.sd = sd(a1)\n a0.est = apply(as.matrix(a0), 2, point_estimate)\n a0.HPD = HPDinterval(a0)\n a0.sd = sd(a0)\n S2.est = apply(as.matrix(S2), 2, point_estimate)\n S2.HPD = HPDinterval(S2)\n S2.sd = sd(S2)\n C2.est = apply(as.matrix(C2), 2, point_estimate)\n C2.HPD = HPDinterval(C2)\n C2.sd = sd(C2)\n }\n else {\n if (condInd == FALSE) {\n a1 = as.mcmc(a1)\n a0 = as.mcmc(a0)\n b1 = as.mcmc(b1)\n b0 = as.mcmc(b0)\n d1 = as.mcmc(d1)\n d0 = as.mcmc(d0)\n S2 = as.mcmc(S2)\n C2 = as.mcmc(C2)\n a1.est = apply(as.matrix(a1), 2, point_estimate)\n a1.HPD = HPDinterval(a1)\n a1.sd = sd(a1)\n a0.est = apply(as.matrix(a0), 2, point_estimate)\n a0.HPD = HPDinterval(a0)\n a0.sd = sd(a0)\n b1.est = apply(as.matrix(b1), 2, point_estimate)\n b1.HPD = HPDinterval(b1)\n b1.sd = sd(b1)\n b0.est = apply(as.matrix(b0), 2, point_estimate)\n b0.HPD = HPDinterval(b0)\n b0.sd = sd(b0)\n d1.est = apply(as.matrix(d1), 2, point_estimate)\n d1.HPD = HPDinterval(d1)\n d1.sd = sd(d1)\n d0.est = apply(as.matrix(d0), 2, point_estimate)\n d0.HPD = HPDinterval(d0)\n d0.sd = sd(d0)\n S2.est = apply(as.matrix(S2), 2, point_estimate)\n S2.HPD = HPDinterval(S2)\n S2.sd = sd(S2)\n C2.est = apply(as.matrix(C2), 2, point_estimate)\n C2.HPD = HPDinterval(C2)\n C2.sd = sd(C2)\n }\n }\n }\n batch = 50\n ssize = iter.size/batch\n moy.t = moy.a = moy.T = moy.L = moy.b = moy.p = moy.sig.a = moy.sig.t = moy.s2 = moy.c2 = moy.s1 = moy.c1 = moy.s1_new = moy.c1_new = moy.s.ov = moy.c.ov = moy.a1 = moy.a0 = moy.b1 = moy.b0 = moy.d1 = moy.d0 = numeric()\n if (N == 1) {\n for (i in 1:batch) {\n moy.a = c(moy.a, mean(alpha[round((1 + ssize * (i - \n 1)), 0):round((ssize * i), 0)])/ssize)\n moy.p = c(moy.p, mean(PI[round((1 + ssize * (i - \n 1)), 0):round((ssize * i), 0)])/ssize)\n moy.s1 = c(moy.s1, mean(as.matrix(S1[round((1 + ssize * \n (i - 1)), 0):round((ssize * i), 0)]))/ssize)\n moy.c1 = c(moy.c1, mean(as.matrix(C1[round((1 + ssize * \n (i - 1)), 0):round((ssize * i), 0)]))/ssize)\n moy.L = c(moy.L, mean(LAMBDA[round((1 + ssize * (i - \n 1)), 0):round((ssize * i), 0)]))\n moy.T = c(moy.T, mean(THETA[round((1 + ssize * (i - \n 1)), 0):round((ssize * i), 0)]))\n moy.b = c(moy.b, mean(beta[round((1 + ssize * (i - \n 1)), 0):round((ssize * i), 0)]))\n moy.sig.a = c(moy.sig.a, mean(sigma.alpha[round((1 + \n ssize * (i - 1)), 0):round((ssize * i), 0)]))\n moy.s.ov = c(moy.s.ov, mean(S_overall[round((1 + \n ssize * (i - 1)), 0):round((ssize * i), 0)]))\n moy.c.ov = c(moy.c.ov, mean(C_overall[round((1 + \n ssize * (i - 1)), 0):round((ssize * i), 0)]))\n moy.s1_new = c(moy.s1_new, mean(S1_new[round((1 + \n ssize * (i - 1)), 0):round((ssize * i), 0)]))\n moy.c1_new = c(moy.c1_new, mean(C1_new[round((1 + \n ssize * (i - 1)), 0):round((ssize * i), 0)]))\n moy.t = c(moy.t, mean(theta[round((1 + ssize * (i - \n 1)), 0):round((ssize * i), 0)])/ssize)\n moy.sig.t = c(moy.sig.t, mean(sigma.theta[round((1 + \n ssize * (i - 1)), 0):round((ssize * i), 0)]))\n }\n }\n else {\n for (i in 1:batch) {\n moy.a = cbind(moy.a, colSums(alpha[round((1 + ssize * \n (i - 1)), 0):round((ssize * i), 0), ])/ssize)\n moy.p = cbind(moy.p, colSums(PI[round((1 + ssize * \n (i - 1)), 0):round((ssize * i), 0), ])/ssize)\n moy.s1 = cbind(moy.s1, colSums(as.matrix(S1[round((1 + \n ssize * (i - 1)), 0):round((ssize * i), 0), ]))/ssize)\n moy.c1 = cbind(moy.c1, colSums(as.matrix(C1[round((1 + \n ssize * (i - 1)), 0):round((ssize * i), 0), ]))/ssize)\n moy.L = c(moy.L, mean(LAMBDA[round((1 + ssize * (i - \n 1)), 0):round((ssize * i), 0)]))\n moy.T = c(moy.T, mean(THETA[round((1 + ssize * (i - \n 1)), 0):round((ssize * i), 0)]))\n moy.b = c(moy.b, mean(beta[round((1 + ssize * (i - \n 1)), 0):round((ssize * i), 0)]))\n moy.sig.a = c(moy.sig.a, mean(sigma.alpha[round((1 + \n ssize * (i - 1)), 0):round((ssize * i), 0)]))\n moy.s.ov = c(moy.s.ov, mean(S_overall[round((1 + \n ssize * (i - 1)), 0):round((ssize * i), 0)]))\n moy.c.ov = c(moy.c.ov, mean(C_overall[round((1 + \n ssize * (i - 1)), 0):round((ssize * i), 0)]))\n moy.s1_new = c(moy.s1_new, mean(S1_new[round((1 + \n ssize * (i - 1)), 0):round((ssize * i), 0)]))\n moy.c1_new = c(moy.c1_new, mean(C1_new[round((1 + \n ssize * (i - 1)), 0):round((ssize * i), 0)]))\n moy.t = cbind(moy.t, colSums(theta[round((1 + ssize * \n (i - 1)), 0):round((ssize * i), 0), ])/ssize)\n moy.sig.t = c(moy.sig.t, mean(sigma.theta[round((1 + \n ssize * (i - 1)), 0):round((ssize * i), 0)]))\n }\n }\n if (N == 1) {\n alpha.MCerror = sqrt(sum((moy.a - mean(alpha)/iter.size)^2)/(batch - \n 1))/sqrt(batch)\n PI.MCerror = sqrt(sum((moy.p - mean(PI)/iter.size)^2)/(batch - \n 1))/sqrt(batch)\n S1.MCerror = sqrt(sum((moy.s1 - mean(as.matrix(S1))/iter.size)^2)/(batch - \n 1))/sqrt(batch)\n C1.MCerror = sqrt(sum((moy.c1 - mean(as.matrix(C1))/iter.size)^2)/(batch - \n 1))/sqrt(batch)\n theta.MCerror = sqrt(sum((moy.t - mean(theta)/iter.size)^2)/(batch - \n 1))/sqrt(batch)\n sigma.theta.MCerror = sqrt(sum((moy.sig.t - mean(sigma.theta))^2)/(batch - \n 1))/sqrt(batch)\n }\n else {\n alpha.MCerror = sqrt(rowSums((moy.a - colSums(alpha)/iter.size)^2)/(batch - \n 1))/sqrt(batch)\n PI.MCerror = sqrt(rowSums((moy.p - colSums(PI)/iter.size)^2)/(batch - \n 1))/sqrt(batch)\n S1.MCerror = sqrt(rowSums((moy.s1 - colSums(as.matrix(S1))/iter.size)^2)/(batch - \n 1))/sqrt(batch)\n C1.MCerror = sqrt(rowSums((moy.c1 - colSums(as.matrix(C1))/iter.size)^2)/(batch - \n 1))/sqrt(batch)\n theta.MCerror = sqrt(rowSums((moy.t - colSums(theta)/iter.size)^2)/(batch - \n 1))/sqrt(batch)\n sigma.theta.MCerror = sqrt(sum((moy.sig.t - mean(sigma.theta))^2)/(batch - \n 1))/sqrt(batch)\n }\n THETA.MCerror = sqrt(sum((moy.T - mean(THETA))^2)/(batch - \n 1))/sqrt(batch)\n LAMBDA.MCerror = sqrt(sum((moy.L - mean(LAMBDA))^2)/(batch - \n 1))/sqrt(batch)\n beta.MCerror = sqrt(sum((moy.b - mean(beta))^2)/(batch - \n 1))/sqrt(batch)\n sigma.alpha.MCerror = sqrt(sum((moy.sig.a - mean(sigma.alpha))^2)/(batch - \n 1))/sqrt(batch)\n S_overall.MCerror = sqrt(sum((moy.s.ov - mean(S_overall))^2)/(batch - \n 1))/sqrt(batch)\n C_overall.MCerror = sqrt(sum((moy.c.ov - mean(C_overall))^2)/(batch - \n 1))/sqrt(batch)\n S1_new.MCerror = sqrt(sum((moy.s1_new - mean(S1_new))^2)/(batch - \n 1))/sqrt(batch)\n C1_new.MCerror = sqrt(sum((moy.c1_new - mean(C1_new))^2)/(batch - \n 1))/sqrt(batch)\n if (condInd == TRUE & Gold_Std == FALSE & model == 1) {\n if (Gold_se == TRUE & Gold_sp == FALSE) {\n if (sub_rs[[1]] == 1) {\n for (i in 1:batch) {\n moy.c2 = c(moy.c2, mean(C2[round((1 + ssize * \n (i - 1)), 0):round((ssize * i), 0)]))\n }\n C2.MCerror = sqrt(sum((moy.c2 - mean(C2))^2)/(batch - \n 1))/sqrt(batch)\n }\n else {\n if (sub_rs[[1]] > 1) {\n for (i in 1:batch) {\n moy.c2 = cbind(moy.c2, colSums(as.matrix(C2[round((1 + \n ssize * (i - 1)), 0):round((ssize * i), \n 0), ]))/ssize)\n }\n C2.MCerror = sqrt(rowSums((moy.c2 - colSums(as.matrix(C2))/iter.size)^2)/(batch - \n 1))/sqrt(batch)\n }\n }\n }\n else {\n if (Gold_sp == TRUE & Gold_se == FALSE) {\n if (sub_rs[[1]] == 1) {\n for (i in 1:batch) {\n moy.s2 = c(moy.s2, mean(S2[round((1 + ssize * \n (i - 1)), 0):round((ssize * i), 0)]))\n }\n S2.MCerror = sqrt(sum((moy.s2 - mean(S2))^2)/(batch - \n 1))/sqrt(batch)\n }\n else {\n if (sub_rs[[1]] > 1) {\n for (i in 1:batch) {\n moy.s2 = cbind(moy.s2, colSums(as.matrix(S2[round((1 + \n ssize * (i - 1)), 0):round((ssize * i), \n 0), ]))/ssize)\n }\n S2.MCerror = sqrt(rowSums((moy.s2 - colSums(as.matrix(S2))/iter.size)^2)/(batch - \n 1))/sqrt(batch)\n }\n }\n }\n else {\n if (sub_rs[[1]] == 1) {\n for (i in 1:batch) {\n moy.s2 = c(moy.s2, mean(S2[round((1 + ssize * \n (i - 1)), 0):round((ssize * i), 0)]))\n moy.c2 = c(moy.c2, mean(C2[round((1 + ssize * \n (i - 1)), 0):round((ssize * i), 0)]))\n }\n S2.MCerror = sqrt(sum((moy.s2 - mean(S2))^2)/(batch - \n 1))/sqrt(batch)\n C2.MCerror = sqrt(sum((moy.c2 - mean(C2))^2)/(batch - \n 1))/sqrt(batch)\n }\n else {\n if (sub_rs[[1]] > 1) {\n for (i in 1:batch) {\n moy.s2 = cbind(moy.s2, colSums(as.matrix(S2[round((1 + \n ssize * (i - 1)), 0):round((ssize * i), \n 0), ]))/ssize)\n moy.c2 = cbind(moy.c2, colSums(as.matrix(C2[round((1 + \n ssize * (i - 1)), 0):round((ssize * i), \n 0), ]))/ssize)\n }\n S2.MCerror = sqrt(rowSums((moy.s2 - colSums(as.matrix(S2))/iter.size)^2)/(batch - \n 1))/sqrt(batch)\n C2.MCerror = sqrt(rowSums((moy.c2 - colSums(as.matrix(C2))/iter.size)^2)/(batch - \n 1))/sqrt(batch)\n }\n }\n }\n }\n }\n else {\n if (condInd == TRUE & Gold_Std == FALSE & model == 2) {\n if (sub_rs[[1]] == 1) {\n for (i in 1:batch) {\n moy.a1 = c(moy.a1, mean(a1[round((1 + ssize * \n (i - 1)), 0):round((ssize * i), 0)]))\n moy.a0 = c(moy.a0, mean(a0[round((1 + ssize * \n (i - 1)), 0):round((ssize * i), 0)]))\n moy.s2 = c(moy.s2, mean(S2[round((1 + ssize * \n (i - 1)), 0):round((ssize * i), 0)]))\n moy.c2 = c(moy.c2, mean(C2[round((1 + ssize * \n (i - 1)), 0):round((ssize * i), 0)]))\n }\n a1.MCerror = sqrt(sum((moy.a1 - mean(a1))^2)/(batch - \n 1))/sqrt(batch)\n a0.MCerror = sqrt(sum((moy.a0 - mean(a0))^2)/(batch - \n 1))/sqrt(batch)\n S2.MCerror = sqrt(sum((moy.s2 - mean(S2))^2)/(batch - \n 1))/sqrt(batch)\n C2.MCerror = sqrt(sum((moy.c2 - mean(C2))^2)/(batch - \n 1))/sqrt(batch)\n }\n else {\n if (sub_rs[[1]] > 1) {\n for (i in 1:batch) {\n moy.a1 = cbind(moy.a1, colSums(as.matrix(a1[round((1 + \n ssize * (i - 1)), 0):round((ssize * i), \n 0), ]))/ssize)\n moy.a0 = cbind(moy.a0, colSums(as.matrix(a0[round((1 + \n ssize * (i - 1)), 0):round((ssize * i), \n 0), ]))/ssize)\n moy.s2 = cbind(moy.s2, colSums(as.matrix(S2[round((1 + \n ssize * (i - 1)), 0):round((ssize * i), \n 0), ]))/ssize)\n moy.c2 = cbind(moy.c2, colSums(as.matrix(C2[round((1 + \n ssize * (i - 1)), 0):round((ssize * i), \n 0), ]))/ssize)\n }\n a1.MCerror = sqrt(rowSums((moy.a1 - colSums(as.matrix(a1))/iter.size)^2)/(batch - \n 1))/sqrt(batch)\n a0.MCerror = sqrt(rowSums((moy.a0 - colSums(as.matrix(a0))/iter.size)^2)/(batch - \n 1))/sqrt(batch)\n S2.MCerror = sqrt(rowSums((moy.s2 - colSums(as.matrix(S2))/iter.size)^2)/(batch - \n 1))/sqrt(batch)\n C2.MCerror = sqrt(rowSums((moy.c2 - colSums(as.matrix(C2))/iter.size)^2)/(batch - \n 1))/sqrt(batch)\n }\n }\n }\n else {\n if (condInd == FALSE) {\n if (sub_rs[[1]] == 1) {\n for (i in 1:batch) {\n moy.a1 = c(moy.a1, mean(a1[round((1 + ssize * \n (i - 1)), 0):round((ssize * i), 0)]))\n moy.a0 = c(moy.a0, mean(a0[round((1 + ssize * \n (i - 1)), 0):round((ssize * i), 0)]))\n moy.b1 = c(moy.b1, mean(b1[round((1 + ssize * \n (i - 1)), 0):round((ssize * i), 0)]))\n moy.b0 = c(moy.b0, mean(b0[round((1 + ssize * \n (i - 1)), 0):round((ssize * i), 0)]))\n moy.d1 = c(moy.d1, mean(d1[round((1 + ssize * \n (i - 1)), 0):round((ssize * i), 0)]))\n moy.d0 = c(moy.d0, mean(d0[round((1 + ssize * \n (i - 1)), 0):round((ssize * i), 0)]))\n moy.s2 = c(moy.s2, mean(S2[round((1 + ssize * \n (i - 1)), 0):round((ssize * i), 0)]))\n moy.c2 = c(moy.c2, mean(C2[round((1 + ssize * \n (i - 1)), 0):round((ssize * i), 0)]))\n }\n a1.MCerror = sqrt(sum((moy.a1 - mean(a1))^2)/(batch - \n 1))/sqrt(batch)\n a0.MCerror = sqrt(sum((moy.a0 - mean(a0))^2)/(batch - \n 1))/sqrt(batch)\n b1.MCerror = sqrt(sum((moy.b1 - mean(b1))^2)/(batch - \n 1))/sqrt(batch)\n b0.MCerror = sqrt(sum((moy.b0 - mean(b0))^2)/(batch - \n 1))/sqrt(batch)\n d1.MCerror = sqrt(sum((moy.d1 - mean(d1))^2)/(batch - \n 1))/sqrt(batch)\n d0.MCerror = sqrt(sum((moy.d0 - mean(d0))^2)/(batch - \n 1))/sqrt(batch)\n S2.MCerror = sqrt(sum((moy.s2 - mean(S2))^2)/(batch - \n 1))/sqrt(batch)\n C2.MCerror = sqrt(sum((moy.c2 - mean(C2))^2)/(batch - \n 1))/sqrt(batch)\n }\n else {\n if (sub_rs[[1]] > 1) {\n for (i in 1:batch) {\n moy.a1 = cbind(moy.a1, colSums(as.matrix(a1[round((1 + \n ssize * (i - 1)), 0):round((ssize * i), \n 0), ]))/ssize)\n moy.a0 = cbind(moy.a0, colSums(as.matrix(a0[round((1 + \n ssize * (i - 1)), 0):round((ssize * i), \n 0), ]))/ssize)\n moy.b1 = cbind(moy.b1, colSums(as.matrix(b1[round((1 + \n ssize * (i - 1)), 0):round((ssize * i), \n 0), ]))/ssize)\n moy.b0 = cbind(moy.b0, colSums(as.matrix(b0[round((1 + \n ssize * (i - 1)), 0):round((ssize * i), \n 0), ]))/ssize)\n moy.d1 = cbind(moy.d1, colSums(as.matrix(d1[round((1 + \n ssize * (i - 1)), 0):round((ssize * i), \n 0), ]))/ssize)\n moy.d0 = cbind(moy.d0, colSums(as.matrix(d0[round((1 + \n ssize * (i - 1)), 0):round((ssize * i), \n 0), ]))/ssize)\n moy.s2 = cbind(moy.s2, colSums(as.matrix(S2[round((1 + \n ssize * (i - 1)), 0):round((ssize * i), \n 0), ]))/ssize)\n moy.c2 = cbind(moy.c2, colSums(as.matrix(C2[round((1 + \n ssize * (i - 1)), 0):round((ssize * i), \n 0), ]))/ssize)\n }\n a1.MCerror = sqrt(rowSums((moy.a1 - colSums(as.matrix(a1))/iter.size)^2)/(batch - \n 1))/sqrt(batch)\n a0.MCerror = sqrt(rowSums((moy.a0 - colSums(as.matrix(a0))/iter.size)^2)/(batch - \n 1))/sqrt(batch)\n b1.MCerror = sqrt(rowSums((moy.b1 - colSums(as.matrix(b1))/iter.size)^2)/(batch - \n 1))/sqrt(batch)\n b0.MCerror = sqrt(rowSums((moy.b0 - colSums(as.matrix(b0))/iter.size)^2)/(batch - \n 1))/sqrt(batch)\n d1.MCerror = sqrt(rowSums((moy.d1 - colSums(as.matrix(d1))/iter.size)^2)/(batch - \n 1))/sqrt(batch)\n d0.MCerror = sqrt(rowSums((moy.d0 - colSums(as.matrix(d0))/iter.size)^2)/(batch - \n 1))/sqrt(batch)\n S2.MCerror = sqrt(rowSums((moy.s2 - colSums(as.matrix(S2))/iter.size)^2)/(batch - \n 1))/sqrt(batch)\n C2.MCerror = sqrt(rowSums((moy.c2 - colSums(as.matrix(C2))/iter.size)^2)/(batch - \n 1))/sqrt(batch)\n }\n }\n }\n }\n }\n data = list(data)\n if (real_life == FALSE) {\n pp = data[[1]][, 1]\n pn = data[[1]][, 2]\n np = data[[1]][, 3]\n nn = data[[1]][, 4]\n Sample.size = pp + pn + np + nn\n true.alpha = tv[[1]][, 1]\n true.theta = tv[[1]][, 2]\n true.S1 = tv[[1]][, 3]\n true.C1 = tv[[1]][, 4]\n true.PI = tv[[1]][, 5]\n true.THETA = tv[[2]][1]\n true.sigma.theta = tv[[2]][2]\n true.LAMBDA = tv[[2]][3]\n true.sigma.alpha = tv[[2]][4]\n true.beta = tv[[2]][5]\n if (Gold_Std == TRUE) {\n true.S_overall = tv[[2]][6]\n true.C_overall = tv[[2]][7]\n }\n else {\n if (condInd == TRUE & Gold_Std == FALSE & model == \n 1) {\n true.S2 = tv[[3]][1, ]\n true.C2 = tv[[3]][2, ]\n true.S_overall = tv[[2]][6]\n true.C_overall = tv[[2]][7]\n }\n else {\n if (condInd == TRUE & Gold_Std == FALSE & model == \n 2) {\n true.a1 = tv[[3]][3, ]\n true.a0 = tv[[3]][4, ]\n true.S2 = tv[[3]][1, ]\n true.C2 = tv[[3]][2, ]\n true.S_overall = tv[[2]][8]\n true.C_overall = tv[[2]][9]\n }\n else {\n if (condInd == FALSE) {\n true.S2 = tv[[3]][1, ]\n true.C2 = tv[[3]][2, ]\n true.a1 = tv[[3]][3, ]\n true.a0 = tv[[3]][4, ]\n true.b1 = tv[[3]][5, ]\n true.b0 = tv[[3]][6, ]\n true.d1 = tv[[2]][6]\n true.d0 = tv[[2]][7]\n true.S_overall = tv[[2]][8]\n true.C_overall = tv[[2]][9]\n }\n }\n }\n }\n test.file = paste(\"Summary for N =\", round((iter.num * \n nb_chains - (burn_in) * nb_chains)/Thin, 0), \".txt\")\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"Number of chains =\", nb_chains), file = test.file, \n append = TRUE)\n write(paste(\"Number of iteration within a chain =\", iter.num, \n \" Burn in within each chain =\", burn_in), file = test.file, \n append = TRUE)\n write(paste(\"Thinning interval =\", Thin), file = test.file, \n append = TRUE)\n write(paste(\"Total number of iteration kept =\", round((iter.num * \n nb_chains - (burn_in) * nb_chains)/Thin, 0)), file = test.file, \n append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"File location : \", path), file = test.file, \n append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"Date :\", Sys.time()), file = test.file, \n append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n if (Gold_Std == TRUE) {\n write(\"Perfect reference standard\", file = test.file, \n append = TRUE)\n }\n else {\n write(\"Imperfect reference standard\", file = test.file, \n append = TRUE)\n }\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\\tSAMPLE SIZE \\t \"), file = test.file, append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\" Total ++ +- -+ --\"), file = test.file, \n append = TRUE)\n for (i in 1:N) {\n write(paste(\"Study \", i, \"\", Sample.size[i], \"\", \n pp[i], \"\", pn[i], \"\", np[i], \"\", nn[i]), file = test.file, \n append = TRUE)\n }\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\\tPRIOR INFORMATION \\t \"), file = test.file, \n append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"Prior of prevalence (pi) is \", prior_dist_PI, \n \"(\", round(alpha.PI, digits = 4), \",\", round(beta.PI, \n digits = 4), \"), <=> pi in [\", low.pi, \",\", up.pi, \n \"]\"), file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"Prior of beta is Uniform(\", round(beta.a, \n 4), \",\", round(beta.b, 4), \")\"), file = test.file, \n append = TRUE)\n write(paste(\"Prior of THETA is Uniform(\", prior.THETA.lower, \n \",\", prior.THETA.upper, \")\"), file = test.file, append = TRUE)\n write(paste(\"Prior of LAMBDA is Uniform(\", prior.LAMBDA.lower, \n \",\", prior.LAMBDA.upper, \")\"), file = test.file, \n append = TRUE)\n if (prior_sig_a == 1) {\n write(paste(\"Prior of sigma_alpha is uniform(\", l.disp.alpha, \n \",\", u.disp.alpha, \")\"), file = test.file, append = TRUE)\n }\n else {\n if (prior_sig_a == 2) {\n write(paste(\"Prior of sigma_alpha^2 is uniform(\", \n l.disp.alpha, \",\", u.disp.alpha, \")\"), file = test.file, \n append = TRUE)\n }\n else {\n if (prior_sig_a == 3) {\n write(paste(\"Prior of precision of sigma_alpha is gamma(\", \n l.disp.alpha, \",\", u.disp.alpha, \")\"), file = test.file, \n append = TRUE)\n }\n }\n }\n if (prior_sig_t == 1) {\n write(paste(\"Prior of sigma_theta is uniform(\", l.disp.theta, \n \",\", u.disp.theta, \")\"), file = test.file, append = TRUE)\n }\n else {\n if (prior_sig_t == 2) {\n write(paste(\"Prior of sigma_theta^2 is uniform(\", \n l.disp.theta, \",\", u.disp.theta, \")\"), file = test.file, \n append = TRUE)\n }\n else {\n if (prior_sig_t == 3) {\n write(paste(\"Prior of precision of sigma_theta is gamma(\", \n l.disp.theta, \",\", u.disp.theta, \")\"), file = test.file, \n append = TRUE)\n }\n }\n }\n if (condInd == TRUE & Gold_Std == FALSE & model == 1) {\n write(paste(\"\"), file = test.file, append = TRUE)\n if (Gold_se == TRUE & Gold_sp == FALSE) {\n write(paste(\"Prior of S2 (Sensitivity of reference test) is \"), \n file = test.file, append = TRUE)\n for (i in 1:rs.length) {\n write(paste(\"Study(ies) \", sub_rs[[i + 1]][1], \n \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"assumed to be perfect.\"), file = test.file, \n append = TRUE)\n }\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"Prior of C2 (Specificity of reference test) is \"), \n file = test.file, append = TRUE)\n for (i in 1:rs.length) {\n write(paste(\"Study(ies) \", sub_rs[[i + 1]][1], \n \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"\", prior_dist_C2, \"(\", round(Spec2.alpha[i], \n digits = 4), \",\", round(Spec2.beta[i], \n digits = 4), \"), <=> C2 in [\", low.sp[i], \n \",\", up.sp[i], \"]\"), file = test.file, append = TRUE)\n }\n }\n else {\n if (Gold_sp == TRUE & Gold_se == FALSE) {\n write(paste(\"Prior of S2 (Sensitivity of reference test) is \"), \n file = test.file, append = TRUE)\n for (i in 1:rs.length) {\n write(paste(\"Study(ies) \", sub_rs[[i + 1]][1], \n \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"\", prior_dist_S2, \"(\", round(Sens2.alpha[i], \n digits = 4), \",\", round(Sens2.beta[i], \n digits = 4), \"), <=> S2 in [\", low.se[i], \n \",\", up.se[i], \"]\"), file = test.file, \n append = TRUE)\n }\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"Prior of C2 (Specificity of reference test) is \"), \n file = test.file, append = TRUE)\n for (i in 1:rs.length) {\n write(paste(\"Study(ies) \", sub_rs[[i + 1]][1], \n \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"assumed to be perfect.\"), file = test.file, \n append = TRUE)\n }\n }\n else {\n write(paste(\"Prior of S2 (Sensitivity of reference test) is \"), \n file = test.file, append = TRUE)\n for (i in 1:rs.length) {\n write(paste(\"Study(ies) \", sub_rs[[i + 1]][1], \n \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"\", prior_dist_S2, \"(\", round(Sens2.alpha[i], \n digits = 4), \",\", round(Sens2.beta[i], \n digits = 4), \"), <=> S2 in [\", low.se[i], \n \",\", up.se[i], \"]\"), file = test.file, \n append = TRUE)\n }\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"Prior of C2 (Specificity of reference test) is \"), \n file = test.file, append = TRUE)\n for (i in 1:rs.length) {\n write(paste(\"Study(ies) \", sub_rs[[i + 1]][1], \n \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"\", prior_dist_C2, \"(\", round(Spec2.alpha[i], \n digits = 4), \",\", round(Spec2.beta[i], \n digits = 4), \"), <=> C2 in [\", low.sp[i], \n \",\", up.sp[i], \"]\"), file = test.file, \n append = TRUE)\n }\n }\n }\n }\n else {\n if (condInd == TRUE & Gold_Std == FALSE & model == \n 2) {\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"Prior of a1 is \"), file = test.file, \n append = TRUE)\n for (i in 1:rs.length) {\n write(paste(\"Study(ies) \", sub_rs[[i + 1]][1], \n \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"Normal (\", round(mean.a1[i], digits = 4), \n \",\", round(sd.a1[i], digits = 4), \"), <=> S2 in []\"), \n file = test.file, append = TRUE)\n }\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"Prior of a0 is \"), file = test.file, \n append = TRUE)\n for (i in 1:rs.length) {\n write(paste(\"Study(ies) \", sub_rs[[i + 1]][1], \n \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"Normal (\", round(mean.a0[i], digits = 4), \n \",\", round(sd.a0[i], digits = 4), \"), <=> C2 in []\"), \n file = test.file, append = TRUE)\n }\n }\n else {\n if (condInd == FALSE) {\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"Prior of a1 is \"), file = test.file, \n append = TRUE)\n for (i in 1:rs.length) {\n write(paste(\"Study(ies) \", sub_rs[[i + 1]][1], \n \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"Normal (\", round(mean.a1[i], \n digits = 4), \",\", round(sd.a1[i], digits = 4), \n \"), <=> S2 in []\"), file = test.file, append = TRUE)\n }\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"Prior of a0 is \"), file = test.file, \n append = TRUE)\n for (i in 1:rs.length) {\n write(paste(\"Study(ies) \", sub_rs[[i + 1]][1], \n \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"Normal (\", round(mean.a0[i], \n digits = 4), \",\", round(sd.a0[i], digits = 4), \n \"), <=> C2 in []\"), file = test.file, append = TRUE)\n }\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"Prior of b1 is \"), file = test.file, \n append = TRUE)\n for (i in 1:rs.length) {\n write(paste(\"Study(ies) \", sub_rs[[i + 1]][1], \n \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"Uniform (\", round(low.b1[i], \n digits = 4), \",\", round(up.b1[i], digits = 4), \n \"), <=> S2 in []\"), file = test.file, append = TRUE)\n }\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"Prior of b0 is \"), file = test.file, \n append = TRUE)\n for (i in 1:rs.length) {\n write(paste(\"Study(ies) \", sub_rs[[i + 1]][1], \n \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"Uniform (\", round(low.b0[i], \n digits = 4), \",\", round(up.b0[i], digits = 4), \n \"), <=> C2 in []\"), file = test.file, append = TRUE)\n }\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"Prior of d1 is Uniform(\", round(low.d1, \n 4), \",\", round(up.d1, 4), \")\"), file = test.file, \n append = TRUE)\n write(paste(\"Prior of d0 is Uniform(\", round(low.d0, \n 4), \",\", round(up.d0, 4), \")\"), file = test.file, \n append = TRUE)\n }\n }\n }\n write(paste(), file = test.file, append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\\tBETWEEN-STUDY parameters (Point estimate =\", \n point_estimate, \")\\t \"), file = test.file, append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\" True_value Estimate Standard_dev MC_error C.I._lower C.I._upper\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"THETA \", round(true.THETA, digits = digit), \n \"\", round(THETA.est, digits = digit), \"\", round(THETA.sd, \n digits = digit), \"\", round(THETA.MCerror, digits = digit), \n \"\", round(THETA.HPD[1], digits = digit), \"\", round(THETA.HPD[2], \n digits = digit)), file = test.file, append = TRUE)\n write(paste(\"LAMBDA \", round(true.LAMBDA, digits = digit), \n \"\", round(LAMBDA.est, digits = digit), \"\", round(LAMBDA.sd, \n digits = digit), \"\", round(LAMBDA.MCerror, digits = digit), \n \"\", round(LAMBDA.HPD[1], digits = digit), \"\", round(LAMBDA.HPD[2], \n digits = digit)), file = test.file, append = TRUE)\n write(paste(\"beta \", round(true.beta, digits = digit), \n \"\", round(beta.est, digits = digit), \"\", round(beta.sd, \n digits = digit), \"\", round(beta.MCerror, digits = digit), \n \"\", round(beta.HPD[1], digits = digit), \"\", round(beta.HPD[2], \n digits = digit)), file = test.file, append = TRUE)\n write(paste(\"sigma.alpha \", round(true.sigma.alpha, digits = digit), \n \"\", round(sigma.alpha.est, digits = digit), \"\", round(sigma.alpha.sd, \n digits = digit), \"\", round(sigma.alpha.MCerror, \n digits = digit), \"\", round(sigma.alpha.HPD[1], \n digits = digit), \"\", round(sigma.alpha.HPD[2], \n digits = digit)), file = test.file, append = TRUE)\n write(paste(\"sigma.theta \", round(true.sigma.theta, digits = digit), \n \"\", round(sigma.theta.est, digits = digit), \"\", round(sigma.theta.sd, \n digits = digit), \"\", round(sigma.theta.MCerror, \n digits = digit), \"\", round(sigma.theta.HPD[1], \n digits = digit), \"\", round(sigma.theta.HPD[2], \n digits = digit)), file = test.file, append = TRUE)\n write(paste(\"Sensitivity (overall)\", round(true.S_overall, digits = digit), \n \"\", round(S_overall.est, digits = digit), \"\", round(S_overall.sd, \n digits = digit), \"\", round(S_overall.MCerror, \n digits = digit), \"\", round(S_overall.HPD[1], \n digits = digit), \"\", round(S_overall.HPD[2], \n digits = digit)), file = test.file, append = TRUE)\n write(paste(\"Specificity (overall)\", round(true.C_overall, digits = digit), \n \"\", round(C_overall.est, digits = digit), \"\", round(C_overall.sd, \n digits = digit), \"\", round(C_overall.MCerror, \n digits = digit), \"\", round(C_overall.HPD[1], \n digits = digit), \"\", round(C_overall.HPD[2], \n digits = digit)), file = test.file, append = TRUE)\n if (condInd == TRUE & Gold_Std == FALSE & model == 1) {\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\\tReference standard (Point estimate =\", \n point_estimate, \")\\t \"), file = test.file, append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\" True_value Estimate Standard_dev MC_error C.I._lower C.I._upper\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n if (Gold_se == TRUE & Gold_sp == FALSE) {\n if (rs.length != 1) {\n for (i in 1:rs.length) {\n write(paste(\"S2 of Study(ies) \", sub_rs[[i + \n 1]][1], \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"\", round(true.S2[i], digits = digit), \n \"(It was assumed to be perfect)\"), file = test.file, \n append = TRUE)\n write(paste(\"C2 of Study(ies) \", sub_rs[[i + \n 1]][1], \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"\", round(true.C2[i], digits = digit), \n \"\", round(C2.est[i], digits = digit), \"\", \n round(C2.sd[i], digits = digit), \"\", round(C2.MCerror[i], \n digits = digit), \"\", round(C2.HPD[i, \n 1], digits = digit), \"\", round(C2.HPD[i, \n 2], digits = digit)), file = test.file, \n append = TRUE)\n }\n }\n else {\n write(paste(\"S2 \", round(true.S2, digits = digit), \n \"(It was assumed to be perfect)\"), file = test.file, \n append = TRUE)\n write(paste(\"C2 \", round(true.C2, digits = digit), \n \"\", round(C2.est, digits = digit), \"\", round(C2.sd, \n digits = digit), \"\", round(C2.MCerror, \n digits = digit), \"\", round(C2.HPD[1], digits = digit), \n \"\", round(C2.HPD[2], digits = digit)), file = test.file, \n append = TRUE)\n }\n }\n else {\n if (Gold_sp == TRUE & Gold_se == FALSE) {\n if (rs.length != 1) {\n for (i in 1:rs.length) {\n write(paste(\"S2 of Study(ies) \", sub_rs[[i + \n 1]][1], \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"\", round(true.S2[i], digits = digit), \n \"\", round(S2.est[i], digits = digit), \n \"\", round(S2.sd[i], digits = digit), \n \"\", round(S2.MCerror[i], digits = digit), \n \"\", round(S2.HPD[i, 1], digits = digit), \n \"\", round(S2.HPD[i, 2], digits = digit)), \n file = test.file, append = TRUE)\n write(paste(\"C2 of Study(ies) \", sub_rs[[i + \n 1]][1], \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"\", round(true.C2[i], digits = digit), \n \"(It was assumed to be perfect)\"), file = test.file, \n append = TRUE)\n }\n }\n else {\n write(paste(\"S2 \", round(true.S2, digits = digit), \n \"\", round(S2.est, digits = digit), \"\", \n round(S2.sd, digits = digit), \"\", round(S2.MCerror, \n digits = digit), \"\", round(S2.HPD[1], \n digits = digit), \"\", round(S2.HPD[2], \n digits = digit)), file = test.file, append = TRUE)\n write(paste(\"C2 \", round(true.C2, digits = digit), \n \"(It was assumed to be perfect)\"), file = test.file, \n append = TRUE)\n }\n }\n else {\n if (rs.length != 1) {\n for (i in 1:rs.length) {\n write(paste(\"S2 of Study(ies) \", sub_rs[[i + \n 1]][1], \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"\", round(true.S2[i], digits = digit), \n \"\", round(S2.est[i], digits = digit), \n \"\", round(S2.sd[i], digits = digit), \n \"\", round(S2.MCerror[i], digits = digit), \n \"\", round(S2.HPD[i, 1], digits = digit), \n \"\", round(S2.HPD[i, 2], digits = digit)), \n file = test.file, append = TRUE)\n }\n for (i in 1:rs.length) {\n write(paste(\"C2 of Study(ies) \", sub_rs[[i + \n 1]][1], \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"\", round(true.C2[i], digits = digit), \n \"\", round(C2.est[i], digits = digit), \n \"\", round(C2.sd[i], digits = digit), \n \"\", round(C2.MCerror[i], digits = digit), \n \"\", round(C2.HPD[i, 1], digits = digit), \n \"\", round(C2.HPD[i, 2], digits = digit)), \n file = test.file, append = TRUE)\n }\n }\n else {\n write(paste(\"S2 \", round(true.S2, digits = digit), \n \"\", round(S2.est, digits = digit), \"\", \n round(S2.sd, digits = digit), \"\", round(S2.MCerror, \n digits = digit), \"\", round(S2.HPD[1], \n digits = digit), \"\", round(S2.HPD[2], \n digits = digit)), file = test.file, append = TRUE)\n write(paste(\"C2 \", round(true.C2, digits = digit), \n \"\", round(C2.est, digits = digit), \"\", \n round(C2.sd, digits = digit), \"\", round(C2.MCerror, \n digits = digit), \"\", round(C2.HPD[1], \n digits = digit), \"\", round(C2.HPD[2], \n digits = digit)), file = test.file, append = TRUE)\n }\n }\n }\n }\n else {\n if (condInd == TRUE & Gold_Std == FALSE & model == \n 2) {\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\\tReference standard (Point estimate =\", \n point_estimate, \")\\t \"), file = test.file, \n append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\" True_value Estimate Standard_dev MC_error C.I._lower C.I._upper\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n if (rs.length != 1) {\n for (i in 1:rs.length) {\n write(paste(\"a1 of Study(ies) \", sub_rs[[i + \n 1]][1], \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"\", round(true.a1[i], digits = digit), \n \"\", round(a1.est[i], digits = digit), \"\", \n round(a1.sd[i], digits = digit), \"\", round(a1.MCerror[i], \n digits = digit), \"\", round(a1.HPD[i, \n 1], digits = digit), \"\", round(a1.HPD[i, \n 2], digits = digit)), file = test.file, \n append = TRUE)\n }\n for (i in 1:rs.length) {\n write(paste(\"a0 of Study(ies) \", sub_rs[[i + \n 1]][1], \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"\", round(true.a0[i], digits = digit), \n \"\", round(a0.est[i], digits = digit), \"\", \n round(a0.sd[i], digits = digit), \"\", round(a0.MCerror[i], \n digits = digit), \"\", round(a0.HPD[i, \n 1], digits = digit), \"\", round(a0.HPD[i, \n 2], digits = digit)), file = test.file, \n append = TRUE)\n }\n write(paste(\"\"), file = test.file, append = TRUE)\n for (i in 1:rs.length) {\n write(paste(\"S2 of Study(ies) \", sub_rs[[i + \n 1]][1], \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"\", round(true.S2[i], digits = digit), \n \"\", round(S2.est[i], digits = digit), \"\", \n round(S2.sd[i], digits = digit), \"\", round(S2.MCerror[i], \n digits = digit), \"\", round(S2.HPD[i, \n 1], digits = digit), \"\", round(S2.HPD[i, \n 2], digits = digit)), file = test.file, \n append = TRUE)\n }\n for (i in 1:rs.length) {\n write(paste(\"C2 of Study(ies) \", sub_rs[[i + \n 1]][1], \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"\", round(true.C2[i], digits = digit), \n \"\", round(C2.est[i], digits = digit), \"\", \n round(C2.sd[i], digits = digit), \"\", round(C2.MCerror[i], \n digits = digit), \"\", round(C2.HPD[i, \n 1], digits = digit), \"\", round(C2.HPD[i, \n 2], digits = digit)), file = test.file, \n append = TRUE)\n }\n }\n else {\n write(paste(\"a1 \", round(true.a1, digits = digit), \n \"\", round(a1.est, digits = digit), \"\", round(a1.sd, \n digits = digit), \"\", round(a1.MCerror, \n digits = digit), \"\", round(a1.HPD[1], digits = digit), \n \"\", round(a1.HPD[2], digits = digit)), file = test.file, \n append = TRUE)\n write(paste(\"a0 \", round(true.a0, digits = digit), \n \"\", round(a0.est, digits = digit), \"\", round(a0.sd, \n digits = digit), \"\", round(a0.MCerror, \n digits = digit), \"\", round(a0.HPD[1], digits = digit), \n \"\", round(a0.HPD[2], digits = digit)), file = test.file, \n append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"S2 \", round(true.S2, digits = digit), \n \"\", round(S2.est, digits = digit), \"\", round(S2.sd, \n digits = digit), \"\", round(S2.MCerror, \n digits = digit), \"\", round(S2.HPD[1], digits = digit), \n \"\", round(S2.HPD[2], digits = digit)), file = test.file, \n append = TRUE)\n write(paste(\"C2 \", round(true.C2, digits = digit), \n \"\", round(C2.est, digits = digit), \"\", round(C2.sd, \n digits = digit), \"\", round(C2.MCerror, \n digits = digit), \"\", round(C2.HPD[1], digits = digit), \n \"\", round(C2.HPD[2], digits = digit)), file = test.file, \n append = TRUE)\n }\n }\n else {\n if (condInd == FALSE) {\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\\tReference standard (Point estimate =\", \n point_estimate, \")\\t \"), file = test.file, \n append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\" True_value Estimate Standard_dev MC_error C.I._lower C.I._upper\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"d1 \", round(true.d1, digits = digit), \n \"\", round(d1.est, digits = digit), \"\", round(d1.sd, \n digits = digit), \"\", round(d1.MCerror, \n digits = digit), \"\", round(d1.HPD[1], digits = digit), \n \"\", round(d1.HPD[2], digits = digit)), file = test.file, \n append = TRUE)\n write(paste(\"d0 \", round(true.d0, digits = digit), \n \"\", round(d0.est, digits = digit), \"\", round(d0.sd, \n digits = digit), \"\", round(d0.MCerror, \n digits = digit), \"\", round(d0.HPD[1], digits = digit), \n \"\", round(d0.HPD[2], digits = digit)), file = test.file, \n append = TRUE)\n if (rs.length != 1) {\n for (i in 1:rs.length) {\n write(paste(\"a1 of Study(ies) \", sub_rs[[i + \n 1]][1], \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"\", round(true.a1[i], digits = digit), \n \"\", round(a1.est[i], digits = digit), \n \"\", round(a1.sd[i], digits = digit), \n \"\", round(a1.MCerror[i], digits = digit), \n \"\", round(a1.HPD[i, 1], digits = digit), \n \"\", round(a1.HPD[i, 2], digits = digit)), \n file = test.file, append = TRUE)\n }\n for (i in 1:rs.length) {\n write(paste(\"a0 of Study(ies) \", sub_rs[[i + \n 1]][1], \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"\", round(true.a0[i], digits = digit), \n \"\", round(a0.est[i], digits = digit), \n \"\", round(a0.sd[i], digits = digit), \n \"\", round(a0.MCerror[i], digits = digit), \n \"\", round(a0.HPD[i, 1], digits = digit), \n \"\", round(a0.HPD[i, 2], digits = digit)), \n file = test.file, append = TRUE)\n }\n write(paste(\"\"), file = test.file, append = TRUE)\n for (i in 1:rs.length) {\n write(paste(\"S2 of Study(ies) \", sub_rs[[i + \n 1]][1], \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"\", round(true.S2[i], digits = digit), \n \"\", round(S2.est[i], digits = digit), \n \"\", round(S2.sd[i], digits = digit), \n \"\", round(S2.MCerror[i], digits = digit), \n \"\", round(S2.HPD[i, 1], digits = digit), \n \"\", round(S2.HPD[i, 2], digits = digit)), \n file = test.file, append = TRUE)\n }\n for (i in 1:rs.length) {\n write(paste(\"C2 of Study(ies) \", sub_rs[[i + \n 1]][1], \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"\", round(true.C2[i], digits = digit), \n \"\", round(C2.est[i], digits = digit), \n \"\", round(C2.sd[i], digits = digit), \n \"\", round(C2.MCerror[i], digits = digit), \n \"\", round(C2.HPD[i, 1], digits = digit), \n \"\", round(C2.HPD[i, 2], digits = digit)), \n file = test.file, append = TRUE)\n }\n for (i in 1:rs.length) {\n write(paste(\"b1 of Study(ies) \", sub_rs[[i + \n 1]][1], \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"\", round(true.b1[i], digits = digit), \n \"\", round(b1.est[i], digits = digit), \n \"\", round(b1.sd[i], digits = digit), \n \"\", round(b1.MCerror[i], digits = digit), \n \"\", round(b1.HPD[i, 1], digits = digit), \n \"\", round(b1.HPD[i, 2], digits = digit)), \n file = test.file, append = TRUE)\n }\n for (i in 1:rs.length) {\n write(paste(\"b0 of Study(ies) \", sub_rs[[i + \n 1]][1], \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"\", round(true.b0[i], digits = digit), \n \"\", round(b0.est[i], digits = digit), \n \"\", round(b0.sd[i], digits = digit), \n \"\", round(b0.MCerror[i], digits = digit), \n \"\", round(b0.HPD[i, 1], digits = digit), \n \"\", round(b0.HPD[i, 2], digits = digit)), \n file = test.file, append = TRUE)\n }\n }\n else {\n write(paste(\"a1 \", round(true.a1, digits = digit), \n \"\", round(a1.est, digits = digit), \"\", \n round(a1.sd, digits = digit), \"\", round(a1.MCerror, \n digits = digit), \"\", round(a1.HPD[1], \n digits = digit), \"\", round(a1.HPD[2], \n digits = digit)), file = test.file, append = TRUE)\n write(paste(\"a0 \", round(true.a0, digits = digit), \n \"\", round(a0.est, digits = digit), \"\", \n round(a0.sd, digits = digit), \"\", round(a0.MCerror, \n digits = digit), \"\", round(a0.HPD[1], \n digits = digit), \"\", round(a0.HPD[2], \n digits = digit)), file = test.file, append = TRUE)\n write(paste(\"b1 \", round(true.b1, digits = digit), \n \"\", round(b1.est, digits = digit), \"\", \n round(b1.sd, digits = digit), \"\", round(b1.MCerror, \n digits = digit), \"\", round(b1.HPD[1], \n digits = digit), \"\", round(b1.HPD[2], \n digits = digit)), file = test.file, append = TRUE)\n write(paste(\"b0 \", round(true.b0, digits = digit), \n \"\", round(b0.est, digits = digit), \"\", \n round(b0.sd, digits = digit), \"\", round(b0.MCerror, \n digits = digit), \"\", round(b0.HPD[1], \n digits = digit), \"\", round(b0.HPD[2], \n digits = digit)), file = test.file, append = TRUE)\n write(paste(\"S2 \", round(true.S2, digits = digit), \n \"\", round(S2.est, digits = digit), \"\", \n round(S2.sd, digits = digit), \"\", round(S2.MCerror, \n digits = digit), \"\", round(S2.HPD[1], \n digits = digit), \"\", round(S2.HPD[2], \n digits = digit)), file = test.file, append = TRUE)\n write(paste(\"C2 \", round(true.C2, digits = digit), \n \"\", round(C2.est, digits = digit), \"\", \n round(C2.sd, digits = digit), \"\", round(C2.MCerror, \n digits = digit), \"\", round(C2.HPD[1], \n digits = digit), \"\", round(C2.HPD[2], \n digits = digit)), file = test.file, append = TRUE)\n }\n }\n }\n }\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\\tWITHIN-STUDY PARAMETERS \\t \"), file = test.file, \n append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\\ttheta \\t \"), file = test.file, append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\" True_value Estimate Standard_dev MC_error C.I._lower C.I._upper\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n for (i in 1:N) {\n write(paste(\"Study \", i, \"\", round(true.theta[i], \n digits = digit), \"\", round(theta.est[i], digits = digit), \n \"\", round(theta.sd[i], digits = digit), \"\", round(theta.MCerror[i], \n digits = digit), \"\", round(theta.HPD[i, 1], \n digits = digit), \"\", round(theta.HPD[i, 2], \n digits = digit)), file = test.file, append = TRUE)\n }\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\\talpha \\t \"), file = test.file, append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\" True_value Estimate Standard_Dev MC_error C.I._lower C.I._upper\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n for (i in 1:N) {\n write(paste(\"Study \", i, \"\", round(true.alpha[i], \n digits = digit), \"\", round(alpha.est[i], digits = digit), \n \"\", round(alpha.sd[i], digits = digit), \"\", round(alpha.MCerror[i], \n digits = digit), \"\", round(alpha.HPD[i, 1], \n digits = digit), \"\", round(alpha.HPD[i, 2], \n digits = digit)), file = test.file, append = TRUE)\n }\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\\tPrevalence \\t \"), file = test.file, append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\" True_value Estimate Standard_dev MC_error C.I._lower C.I._upper\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n for (i in 1:N) {\n write(paste(\"Study \", i, \"\", round(true.PI[i], digits = digit), \n \"\", round(PI.est[i], digits = digit), \"\", round(PI.sd[i], \n digits = digit), \"\", round(PI.MCerror[i], digits = digit), \n \"\", round(PI.HPD[i, 1], digits = digit), \"\", \n round(PI.HPD[i, 2], digits = digit)), file = test.file, \n append = TRUE)\n }\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\\tSensitivity of test 1 (S1) \\t \"), file = test.file, \n append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\" True_value Estimate Standard_dev MC_error C.I._lower C.I._upper\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n for (i in 1:N) {\n write(paste(\"Study \", i, \"\", round(true.S1[i], digits = digit), \n \"\", round(S1.est[i], digits = digit), \"\", round(S1.sd[i], \n digits = digit), \"\", round(S1.MCerror[i], digits = digit), \n \"\", round(S1.HPD[i, 1], digits = digit), \"\", \n round(S1.HPD[i, 2], digits = digit)), file = test.file, \n append = TRUE)\n }\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\\tSpecificity of test 1 (C1) \\t \"), file = test.file, \n append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\" True_value Estimate Standard_dev MC_error C.I._lower C.I._upper\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n for (i in 1:N) {\n write(paste(\"Study \", i, \"\", round(true.C1[i], digits = digit), \n \"\", round(C1.est[i], digits = digit), \"\", round(C1.sd[i], \n digits = digit), \"\", round(C1.MCerror[i], digits = digit), \n \"\", round(C1.HPD[i, 1], digits = digit), \"\", \n round(C1.HPD[i, 2], digits = digit)), file = test.file, \n append = TRUE)\n }\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\\tPosterior predictive value of Sensitivity of test under evaluation (S1) \\t \"), \n file = test.file, append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\" True_value Estimate Standard_dev MC_error C.I._lower C.I._upper\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"Sensitivity ------ \", round(S1_new.est, \n digits = digit), \"\", round(S1_new.sd, digits = digit), \n \"\", round(S1_new.MCerror, digits = digit), \"\", round(S1_new.HPD[1], \n digits = digit), \"\", round(S1_new.HPD[2], digits = digit)), \n file = test.file, append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\\tPosterior predictive value of Specificity of test under evaluation (C1) \\t \"), \n file = test.file, append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\" True_value Estimate Standard_dev MC_error C.I._lower C.I._upper\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"Specificity ------ \", round(C1_new.est, \n digits = digit), \"\", round(C1_new.sd, digits = digit), \n \"\", round(C1_new.MCerror, digits = digit), \"\", round(C1_new.HPD[1], \n digits = digit), \"\", round(C1_new.HPD[2], digits = digit)), \n file = test.file, append = TRUE)\n Num_study = c()\n for (i in 1:N) {\n Num_study = c(Num_study, paste(\"Study\", i))\n }\n\n\n if (condInd == TRUE & Gold_Std == FALSE & model == 1) {\n if (Gold_se == TRUE & Gold_sp == FALSE) {\n if (rs.length != 1) {\n refstd_parameters = array(0, dim = c(rs.length, \n 4, 1), dimnames = list(1:rs.length, c(\"True Value\", \n paste(point_estimate, \"estimate\"), \"HPD lower\", \n \"HPD upper\"), \"C2\"))\n refstd_parameters[, 1, 1] <- true.C2\n refstd_parameters[, 2, 1] <- C2.est\n refstd_parameters[, 3, 1] <- C2.HPD[, 1]\n refstd_parameters[, 4, 1] <- C2.HPD[, 2]\n long = length(alpha[, 1])\n refstd_Parameters = array(0, c(long, rs.length, \n 1))\n refstd_Parameters[, , 1] <- C2\n if (print_plot == TRUE) {\n if (is.null(chain) == FALSE) {\n file.png_RS = paste(\"RefStd trace plots for N =\", \n round((iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin, 0), \".png\")\n png(file.png_RS)#, paper = \"a4\", height = 20)\n param = \"Specificity\"\n par(mfcol = c(5, 2))\n no_chains = length(chain)\n iter_chain = round((iter.num * nb_chains - \n (burn_in) * nb_chains)/Thin, 0)/no_chains\n longueur = 1:iter_chain\n for (i in 1:rs.length) {\n plot(x = longueur, y = refstd_Parameters[longueur, \n i, 1], type = \"n\", col = 1, ylab = paste(param, \n \" of reference standard \", i), xlab = \"iteration number\", \n main = paste(\"Thinning interval = \", \n thin.interval, \"\\n Total samplesize kept = \", \n (iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin))\n for (l in 1:length(chain)) {\n lines(x = longueur, y = refstd_Parameters[, \n i, 1], col = l)\n }\n }\n }\n else {\n file.png_RS = paste(\"RefStd trace plots for N =\", \n round((iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin, 0), \".png\")\n png(file.png_RS)#, paper = \"a4\", height = 20)\n param = \"Specificity\"\n par(mfcol = c(5, 2))\n longueur = 1:long\n for (i in 1:rs.length) {\n plot(x = longueur, y = refstd_Parameters[, \n i, 1], type = \"l\", col = \"grey\", ylab = paste(param, \n \" of reference standard \", i), xlab = \"iteration number\", \n main = paste(\"Thinning interval = \", \n thin.interval, \"\\n Total samplesize kept = \", \n (iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin))\n }\n }\n dev.off()\n file.png_RS2 = paste(\"RefStd density plots for N =\", \n round((iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin, 0), \".png\")\n png(file.png_RS2)#, paper = \"a4\", height = 20)\n param = \"Specificity\"\n par(mfcol = c(5, 2))\n longueur = 1:long\n for (i in 1:rs.length) {\n plot(density(refstd_Parameters[, i, 1]), \n lwd = 4, type = \"l\", col = \"grey\", main = paste(param, \n \" of reference standard \", i, \" \\n Thinning interval = \", \n thin.interval, \"\\n Total samplesize kept = \", \n (iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin))\n }\n dev.off()\n }\n }\n else {\n refstd_parameters = matrix(0, ncol = 4, nrow = 1)\n rownames(refstd_parameters) = c(\"C2\")\n colnames(refstd_parameters) = c(\"True Value\", \n paste(point_estimate, \"estimate\"), \"HPD.low\", \n \"HPD.high\")\n refstd_parameters[1, 1] <- true.C2\n refstd_parameters[1, 2] <- C2.est\n refstd_parameters[1, 3] <- C2.HPD[1]\n refstd_parameters[1, 4] <- C2.HPD[2]\n long = length(THETA)\n refstd_Parameters = matrix(0, nrow = long, \n ncol = 1)\n colnames(refstd_Parameters) = c(\"C2\")\n refstd_Parameters[, 1] <- C2\n if (print_plot == TRUE) {\n if (is.null(chain) == FALSE) {\n file.png_RS = paste(\"RefStd trace plots for N =\", \n round((iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin, 0), \".png\")\n png(file.png_RS)#, paper = \"a4\", height = 20)\n param = \"Specificity of reference standard\"\n par(mfcol = c(5, 2))\n no_chains = length(chain)\n iter_chain = round((iter.num * nb_chains - \n (burn_in) * nb_chains)/Thin, 0)/no_chains\n longueur = 1:iter_chain\n plot(x = longueur, y = refstd_Parameters[longueur, \n 1], type = \"n\", col = 1, ylab = paste(param), \n xlab = \"iteration number\", main = paste(\"Thinning interval = \", \n thin.interval, \"\\n Total samplesize kept = \", \n (iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin))\n for (l in 1:length(chain)) {\n lines(x = longueur, y = refstd_Parameters[longueur + \n (iter_chain * (l - 1)), 1], col = l)\n }\n }\n else {\n file.png_RS = paste(\"RefStd trace plots for N =\", \n round((iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin, 0), \".png\")\n png(file.png_RS)#, paper = \"a4\", height = 20)\n param = \"Specificity of reference standard\"\n par(mfcol = c(5, 2))\n longueur = 1:long\n plot(x = longueur, y = refstd_Parameters[, \n 1], type = \"l\", col = \"grey\", ylab = paste(param), \n xlab = \"iteration number\", main = paste(\"Thinning interval = \", \n thin.interval, \"\\n Total samplesize kept = \", \n (iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin))\n }\n dev.off()\n file.png_RS2 = paste(\"RefStd density plots for N =\", \n round((iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin, 0), \".png\")\n png(file.png_RS2)#, paper = \"a4\", height = 20)\n param = \"Specificity\"\n par(mfcol = c(5, 2))\n longueur = 1:long\n plot(density(refstd_Parameters[, 1]), lwd = 4, \n type = \"l\", col = \"grey\", main = paste(param, \n \" of reference standard \\n Thinning interval = \", \n thin.interval, \"\\n Total samplesize kept = \", \n (iter.num * nb_chains - (burn_in) * nb_chains)/Thin))\n dev.off()\n }\n }\n }\n else {\n if (Gold_sp == TRUE & Gold_se == FALSE) {\n if (rs.length != 1) {\n refstd_parameters = array(0, dim = c(rs.length, \n 4, 1), dimnames = list(1:rs.length, c(\"True Value\", \n paste(point_estimate, \"estimate\"), \"HPD lower\", \n \"HPD upper\"), \"S2\"))\n refstd_parameters[, 1, 1] <- true.S2\n refstd_parameters[, 2, 1] <- S2.est\n refstd_parameters[, 3, 1] <- S2.HPD[, 1]\n refstd_parameters[, 4, 1] <- S2.HPD[, 2]\n long = length(alpha[, 1])\n refstd_Parameters = array(0, c(long, rs.length, \n 1))\n refstd_Parameters[, , 1] <- S2\n if (print_plot == TRUE) {\n if (is.null(chain) == FALSE) {\n file.png_RS = paste(\"RefStd trace plots for N =\", \n round((iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin, 0), \".png\")\n png(file.png_RS)#, paper = \"a4\", height = 20)\n param = \"Sensitivity\"\n par(mfcol = c(5, 2))\n no_chains = length(chain)\n iter_chain = round((iter.num * nb_chains - \n (burn_in) * nb_chains)/Thin, 0)/no_chains\n longueur = 1:iter_chain\n for (i in 1:rs.length) {\n plot(x = longueur, y = refstd_Parameters[longueur, \n i, 1], type = \"n\", col = 1, ylab = paste(param, \n \" of reference standard \", i), xlab = \"iteration number\", \n main = paste(\"Thinning interval = \", \n thin.interval, \"\\n Total samplesize kept = \", \n (iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin))\n for (l in 1:length(chain)) {\n lines(x = longueur, y = refstd_Parameters[longueur + \n (iter_chain * (l - 1)), i, 1], \n col = l)\n }\n }\n }\n else {\n file.png_RS = paste(\"RefStd trace plots for N =\", \n round((iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin, 0), \".png\")\n png(file.png_RS)#, paper = \"a4\", height = 20)\n param = \"Sensitivity\"\n par(mfcol = c(5, 2))\n longueur = 1:long\n for (i in 1:rs.length) {\n plot(x = longueur, y = refstd_Parameters[, \n i, 1], type = \"l\", col = \"grey\", \n ylab = paste(param, \" of reference standard \", \n i), xlab = \"iteration number\", \n main = paste(\"Thinning interval = \", \n thin.interval, \"\\n Total samplesize kept = \", \n (iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin))\n }\n }\n dev.off()\n file.png_RS2 = paste(\"RefStd density plots for N =\", \n round((iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin, 0), \".png\")\n png(file.png_RS2)#, paper = \"a4\", height = 20)\n param = \"Sensitivity\"\n par(mfcol = c(5, 2))\n longueur = 1:long\n for (i in 1:rs.length) {\n plot(density(refstd_Parameters[, i, 1]), \n lwd = 4, type = \"l\", col = \"grey\", \n main = paste(param, \" of reference standard \", \n i, \" \\n Thinning interval = \", thin.interval, \n \"\\n Total samplesize kept = \", (iter.num * \n nb_chains - (burn_in) * nb_chains)/Thin))\n }\n dev.off()\n }\n }\n else {\n refstd_parameters = matrix(0, ncol = 4, nrow = 1)\n rownames(refstd_parameters) = c(\"S2\")\n colnames(refstd_parameters) = c(\"True Value\", \n paste(point_estimate, \"estimate\"), \"HPD.low\", \n \"HPD.high\")\n refstd_parameters[1, 1] <- true.S2\n refstd_parameters[1, 2] <- S2.est\n refstd_parameters[1, 3] <- S2.HPD[1]\n refstd_parameters[1, 4] <- S2.HPD[2]\n long = length(THETA)\n refstd_Parameters = matrix(0, nrow = long, \n ncol = 1)\n colnames(refstd_Parameters) = c(\"S2\")\n refstd_Parameters[, 1] <- S2\n if (print_plot == TRUE) {\n if (is.null(chain) == FALSE) {\n file.png_RS = paste(\"RefStd trace plots for N =\", \n round((iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin, 0), \".png\")\n png(file.png_RS)#, paper = \"a4\", height = 20)\n param = \"Sensitivity of reference standard\"\n par(mfcol = c(5, 2))\n no_chains = length(chain)\n iter_chain = round((iter.num * nb_chains - \n (burn_in) * nb_chains)/Thin, 0)/no_chains\n longueur = 1:iter_chain\n plot(x = longueur, y = refstd_Parameters[longueur, \n 1], type = \"n\", col = 1, ylab = paste(param), \n xlab = \"iteration number\", main = paste(\"Thinning interval = \", \n thin.interval, \"\\n Total samplesize kept = \", \n (iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin))\n for (l in 1:length(chain)) {\n lines(x = longueur, y = refstd_Parameters[longueur + \n (iter_chain * (l - 1)), 1], col = l)\n }\n }\n else {\n file.png_RS = paste(\"RefStd trace plots for N =\", \n round((iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin, 0), \".png\")\n png(file.png_RS)#, paper = \"a4\", height = 20)\n param = \"Sensitivity of reference standard\"\n par(mfcol = c(5, 2))\n longueur = 1:long\n plot(x = longueur, y = refstd_Parameters[, \n 1], type = \"l\", col = \"grey\", ylab = paste(param), \n xlab = \"iteration number\", main = paste(\"Thinning interval = \", \n thin.interval, \"\\n Total samplesize kept = \", \n (iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin))\n }\n dev.off()\n file.png_RS2 = paste(\"RefStd density plots for N =\", \n round((iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin, 0), \".png\")\n png(file.png_RS2)#, paper = \"a4\", height = 20)\n param = \"Sensitivity of reference standard\"\n par(mfcol = c(5, 2))\n longueur = 1:long\n plot(density(refstd_Parameters[, 1]), lwd = 4, \n type = \"l\", col = \"grey\", main = paste(param, \n \" \\n Thinning interval = \", thin.interval, \n \"\\n Total samplesize kept = \", (iter.num * \n nb_chains - (burn_in) * nb_chains)/Thin))\n dev.off()\n }\n }\n }\n else {\n if (rs.length != 1) {\n refstd_parameters = array(0, dim = c(rs.length, \n 4, 2), dimnames = list(1:rs.length, c(\"True Value\", \n paste(point_estimate, \"estimate\"), \"HPD lower\", \n \"HPD upper\"), c(\"S2\", \"C2\")))\n refstd_parameters[, 1, 1] <- true.S2\n refstd_parameters[, 2, 1] <- S2.est\n refstd_parameters[, 3, 1] <- S2.HPD[, 1]\n refstd_parameters[, 4, 1] <- S2.HPD[, 2]\n refstd_parameters[, 1, 2] <- true.C2\n refstd_parameters[, 2, 2] <- C2.est\n refstd_parameters[, 3, 2] <- C2.HPD[, 1]\n refstd_parameters[, 4, 2] <- C2.HPD[, 2]\n long = length(alpha[, 1])\n refstd_Parameters = array(0, c(long, rs.length, \n 2))\n refstd_Parameters[, , 1] <- S2\n refstd_Parameters[, , 2] <- C2\n if (print_plot == TRUE) {\n if (is.null(chain) == FALSE) {\n file.png_RS = paste(\"RefStd trace plots for N =\", \n round((iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin, 0), \".png\")\n png(file.png_RS)#, paper = \"a4\", height = 20)\n param = c(\"Sensitivity\", \"Specificity\")\n par(mfcol = c(5, 2))\n no_chains = length(chain)\n iter_chain = round((iter.num * nb_chains - \n (burn_in) * nb_chains)/Thin, 0)/no_chains\n longueur = 1:iter_chain\n for (j in 1:2) {\n for (i in 1:rs.length) {\n plot(x = longueur, y = refstd_Parameters[longueur, \n i, j], type = \"n\", col = 1, ylab = paste(param[j], \n \" of reference standard \", i), \n xlab = \"iteration number\", main = paste(\"Thinning interval = \", \n thin.interval, \"\\n Total samplesize kept = \", \n (iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin))\n for (l in 1:length(chain)) {\n lines(x = longueur, y = refstd_Parameters[longueur + \n (iter_chain * (l - 1)), i, j], \n col = l)\n }\n }\n }\n }\n else {\n file.png_RS = paste(\"RefStd trace plots for N =\", \n round((iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin, 0), \".png\")\n png(file.png_RS)#, paper = \"a4\", height = 20)\n param = c(\"Sensitivity\", \"Specificity\")\n par(mfcol = c(5, 2))\n for (j in 1:2) {\n longueur = 1:long\n for (i in 1:rs.length) {\n plot(x = longueur, y = refstd_Parameters[, \n i, j], type = \"l\", col = \"grey\", \n ylab = paste(param[j], \" of reference standard \", \n i), xlab = \"iteration number\", \n main = paste(\"Thinning interval = \", \n thin.interval, \"\\n Total samplesize kept = \", \n (iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin))\n }\n }\n }\n dev.off()\n file.png_RS2 = paste(\"RefStd density plots for N =\", \n round((iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin, 0), \".png\")\n png(file.png_RS2)#, paper = \"a4\", height = 20)\n param = c(\"Sensitivity\", \"Specificity\")\n par(mfcol = c(5, 2))\n longueur = 1:long\n for (j in 1:2) {\n for (i in 1:rs.length) {\n plot(density(refstd_Parameters[, i, \n j]), lwd = 4, type = \"l\", col = \"grey\", \n main = paste(param[j], \" of reference standard \", \n i, \" \\n Thinning interval = \", \n thin.interval, \"\\n Total samplesize kept = \", \n (iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin))\n }\n }\n dev.off()\n }\n }\n else {\n refstd_parameters = matrix(0, ncol = 4, nrow = 2)\n rownames(refstd_parameters) = c(\"S2\", \"C2\")\n colnames(refstd_parameters) = c(\"True Value\", \n paste(point_estimate, \"estimate\"), \"HPD.low\", \n \"HPD.high\")\n refstd_parameters[1, 1] <- true.S2\n refstd_parameters[1, 2] <- S2.est\n refstd_parameters[1, 3] <- S2.HPD[1]\n refstd_parameters[1, 4] <- S2.HPD[2]\n refstd_parameters[2, 1] <- true.C2\n refstd_parameters[2, 2] <- C2.est\n refstd_parameters[2, 3] <- C2.HPD[1]\n refstd_parameters[2, 4] <- C2.HPD[2]\n long = length(THETA)\n refstd_Parameters = matrix(0, nrow = long, \n ncol = 2)\n colnames(refstd_Parameters) = c(\"S2\", \"C2\")\n refstd_Parameters[, 1] <- S2\n refstd_Parameters[, 2] <- C2\n if (print_plot == TRUE) {\n if (is.null(chain) == FALSE) {\n file.png_RS = paste(\"RefStd trace plots for N =\", \n round((iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin, 0), \".png\")\n png(file.png_RS)#, paper = \"a4\", height = 20)\n no_chains = length(chain)\n iter_chain = round((iter.num * nb_chains - \n (burn_in) * nb_chains)/Thin, 0)/no_chains\n longueur = 1:iter_chain\n param = c(\"Sensitivity of reference standard\", \n \"Specificity of reference standard\")\n par(mfcol = c(5, 2))\n for (j in 1:2) {\n plot(x = longueur, y = refstd_Parameters[longueur, \n j], type = \"n\", col = 1, ylab = paste(param[j]), \n xlab = \"iteration number\", main = paste(\"Thinning interval = \", \n thin.interval, \"\\n Total samplesize kept = \", \n (iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin))\n for (l in 1:length(chain)) {\n lines(x = longueur, y = refstd_Parameters[longueur + \n (iter_chain * (l - 1)), j], col = l)\n }\n }\n }\n else {\n file.png_RS = paste(\"RefStd trace plots for N =\", \n round((iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin, 0), \".png\")\n png(file.png_RS)#, paper = \"a4\", height = 20)\n Param = c(\"Sensitivity of reference standard\", \n \"Specificity of reference standard\")\n par(mfcol = c(5, 2))\n for (j in 1:2) {\n longueur = 1:long\n plot(x = longueur, y = refstd_Parameters[, \n j], type = \"l\", col = \"grey\", ylab = paste(Param[j]), \n xlab = \"iteration number\", main = paste(\"Thinning interval = \", \n thin.interval, \"\\n Total samplesize kept = \", \n (iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin))\n }\n }\n dev.off()\n file.png_RS2 = paste(\"RefStd density plots for N =\", \n round((iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin, 0), \".png\")\n png(file.png_RS2)#, paper = \"a4\", height = 20)\n param = c(\"Sensitivity of reference standard\", \n \"Specificity of reference standard\")\n par(mfcol = c(5, 2))\n for (j in 1:2) {\n longueur = 1:long\n plot(density(refstd_Parameters[, j]), \n lwd = 4, type = \"l\", col = \"grey\", \n main = paste(param[j], \" \\n Thinning interval = \", \n thin.interval, \"\\n Total samplesize kept = \", \n (iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin))\n }\n dev.off()\n }\n }\n }\n }\n }\n else {\n if (condInd == TRUE & Gold_Std == FALSE & model == \n 2) {\n if (rs.length != 1) {\n refstd_parameters = array(0, dim = c(rs.length, \n 4, 4), dimnames = list(1:rs.length, c(\"True Value\", \n paste(point_estimate, \"estimate\"), \"HPD lower\", \n \"HPD upper\"), c(\"S2\", \"C2\", \"a1\", \"a0\")))\n refstd_parameters[, 1, 1] <- true.S2\n refstd_parameters[, 2, 1] <- S2.est\n refstd_parameters[, 3, 1] <- S2.HPD[, 1]\n refstd_parameters[, 4, 1] <- S2.HPD[, 2]\n refstd_parameters[, 1, 2] <- true.C2\n refstd_parameters[, 2, 2] <- C2.est\n refstd_parameters[, 3, 2] <- C2.HPD[, 1]\n refstd_parameters[, 4, 2] <- C2.HPD[, 2]\n refstd_parameters[, 1, 3] <- true.a1\n refstd_parameters[, 2, 3] <- a1.est\n refstd_parameters[, 3, 3] <- a1.HPD[, 1]\n refstd_parameters[, 4, 3] <- a1.HPD[, 2]\n refstd_parameters[, 1, 4] <- true.a0\n refstd_parameters[, 2, 4] <- a0.est\n refstd_parameters[, 3, 4] <- a0.HPD[, 1]\n refstd_parameters[, 4, 4] <- a0.HPD[, 2]\n long = length(alpha[, 1])\n refstd_Parameters = array(0, c(long, rs.length, \n 4))\n refstd_Parameters[, , 1] <- S2\n refstd_Parameters[, , 2] <- C2\n refstd_Parameters[, , 3] <- a1\n refstd_Parameters[, , 4] <- a0\n if (print_plot == TRUE) {\n file.png_RS = paste(\"RefStd trace plots for N =\", \n round((iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin, 0), \".png\")\n png(file.png_RS)#, paper = \"a4\", height = 20)\n param = c(\"Sensitivity\", \"Specificity\", \"a1\", \n \"a0\")\n par(mfcol = c(5, 2))\n for (j in 1:4) {\n longueur = 1:long\n for (i in 1:rs.length) {\n plot(x = longueur, y = refstd_Parameters[, \n i, j], type = \"l\", col = \"grey\", ylab = paste(param[j], \n \" of reference standard \", i), xlab = \"iteration number\", \n main = paste(\"Thinning interval = \", \n thin.interval, \"\\n Total samplesize kept = \", \n (iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin))\n abline(a = refstd_parameters[i, 2, j], \n b = 0, col = \"black\", lwd = 3)\n abline(a = refstd_parameters[i, 1, j], \n b = 0, col = \"red\", lwd = 3)\n abline(a = refstd_parameters[i, 3, j], \n b = 0, col = \"green\", lwd = 3)\n abline(a = refstd_parameters[i, 4, j], \n b = 0, col = \"green\", lwd = 3)\n }\n }\n dev.off()\n file.png_RS2 = paste(\"RefStd density plots for N =\", \n round((iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin, 0), \".png\")\n png(file.png_RS2)#, paper = \"a4\", height = 20)\n param = c(\"Sensitivity\", \"Specificity\", \"a1\", \n \"a0\")\n par(mfcol = c(5, 2))\n longueur = 1:long\n for (j in 1:4) {\n for (i in 1:rs.length) {\n plot(density(refstd_Parameters[, i, j]), \n lwd = 4, type = \"l\", col = \"grey\", \n main = paste(param[j], \" of reference standard \", \n i, \" \\n Thinning interval = \", thin.interval, \n \"\\n Total samplesize kept = \", (iter.num * \n nb_chains - (burn_in) * nb_chains)/Thin))\n }\n }\n dev.off()\n }\n }\n else {\n refstd_parameters = matrix(0, ncol = 4, nrow = 4)\n rownames(refstd_parameters) = c(\"S2\", \"C2\", \n \"a1\", \"a0\")\n colnames(refstd_parameters) = c(\"True Value\", \n paste(point_estimate, \"estimate\"), \"HPD.low\", \n \"HPD.high\")\n refstd_parameters[1, 1] <- true.S2\n refstd_parameters[1, 2] <- S2.est\n refstd_parameters[1, 3] <- S2.HPD[1]\n refstd_parameters[1, 4] <- S2.HPD[2]\n refstd_parameters[2, 1] <- true.C2\n refstd_parameters[2, 2] <- C2.est\n refstd_parameters[2, 3] <- C2.HPD[1]\n refstd_parameters[2, 4] <- C2.HPD[2]\n refstd_parameters[3, 1] <- true.a1\n refstd_parameters[3, 2] <- a1.est\n refstd_parameters[3, 3] <- a1.HPD[1]\n refstd_parameters[3, 4] <- a1.HPD[2]\n refstd_parameters[4, 1] <- true.a0\n refstd_parameters[4, 2] <- a0.est\n refstd_parameters[4, 3] <- a0.HPD[1]\n refstd_parameters[4, 4] <- a0.HPD[2]\n long = length(THETA)\n refstd_Parameters = matrix(0, nrow = long, \n ncol = 4)\n colnames(refstd_Parameters) = c(\"S2\", \"C2\", \n \"a1\", \"a0\")\n refstd_Parameters[, 1] <- S2\n refstd_Parameters[, 2] <- C2\n refstd_Parameters[, 3] <- a1\n refstd_Parameters[, 4] <- a0\n if (print_plot == TRUE) {\n file.png_RS = paste(\"RefStd trace plots for N =\", \n round((iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin, 0), \".png\")\n png(file.png_RS)#, paper = \"a4\", height = 20)\n Param = c(\"Sensitivity of reference standard\", \n \"Specificity of reference standard\", \"a1\", \n \"a0\")\n par(mfcol = c(5, 2))\n for (j in 1:4) {\n longueur = 1:long\n plot(x = longueur, y = refstd_Parameters[, \n j], type = \"l\", col = \"grey\", ylab = paste(Param[j]), \n xlab = \"iteration number\", main = paste(\"Thinning interval = \", \n thin.interval, \"\\n Total samplesize kept = \", \n (iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin))\n abline(a = refstd_parameters[j, 2], b = 0, \n col = \"black\", lwd = 3)\n abline(a = refstd_parameters[j, 1], b = 0, \n col = \"red\", lwd = 3)\n abline(a = refstd_parameters[j, 3], b = 0, \n col = \"green\", lwd = 3)\n abline(a = refstd_parameters[j, 4], b = 0, \n col = \"green\", lwd = 3)\n }\n dev.off()\n file.png_RS2 = paste(\"RefStd density plots for N =\", \n round((iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin, 0), \".png\")\n png(file.png_RS2)#, paper = \"a4\", height = 20)\n param = c(\"Sensitivity of reference standard\", \n \"Specificity of reference standard\", \"a1\", \n \"a0\")\n par(mfcol = c(5, 2))\n for (j in 1:4) {\n longueur = 1:long\n plot(density(refstd_Parameters[, j]), lwd = 4, \n type = \"l\", col = \"grey\", main = paste(param[j], \n \" \\n Thinning interval = \", thin.interval, \n \"\\n Total samplesize kept = \", (iter.num * \n nb_chains - (burn_in) * nb_chains)/Thin))\n }\n dev.off()\n }\n }\n }\n }\n parameters = array(0, dim = c(N, 4, 5), dimnames = list(Num_study, \n c(\"True value\", paste(point_estimate, \"estimate\"), \n \"HPD lower\", \"HPD upper\"), c(\"theta\", \"alpha\", \n \"pi\", \"S1\", \"C1\")))\n parameters[, 1, 1] <- true.theta\n parameters[, 2, 1] <- theta.est\n parameters[, 3, 1] <- theta.HPD[, 1]\n parameters[, 4, 1] <- theta.HPD[, 2]\n parameters[, 1, 2] <- true.alpha\n parameters[, 2, 2] <- alpha.est\n parameters[, 3, 2] <- alpha.HPD[, 1]\n parameters[, 4, 2] <- alpha.HPD[, 2]\n parameters[, 1, 3] <- true.PI\n parameters[, 2, 3] <- PI.est\n parameters[, 3, 3] <- PI.HPD[, 1]\n parameters[, 4, 3] <- PI.HPD[, 2]\n parameters[, 1, 4] <- true.S1\n parameters[, 2, 4] <- S1.est\n parameters[, 3, 4] <- S1.HPD[, 1]\n parameters[, 4, 4] <- S1.HPD[, 2]\n parameters[, 1, 5] <- true.C1\n parameters[, 2, 5] <- C1.est\n parameters[, 3, 5] <- C1.HPD[, 1]\n parameters[, 4, 5] <- C1.HPD[, 2]\n long = length(alpha[, 1])\n Parameters = array(0, c(long, N, 5))\n Parameters[, , 1] <- theta\n Parameters[, , 2] <- alpha\n Parameters[, , 3] <- PI\n Parameters[, , 4] <- S1\n Parameters[, , 5] <- C1\n parameter = matrix(0, ncol = 4, nrow = 9)\n rownames(parameter) = c(\"THETA\", \"LAMBDA\", \"beta\", \"sigma.alpha\", \n \"sigma.theta\", \"S Overall\", \"C Overall\", \"S1_new\", \n \"C1_new\")\n colnames(parameter) = c(\"True.value\", paste(point_estimate, \n \"estimate\"), \"HPD.low\", \"HPD.high\")\n parameter[1, 1] <- true.THETA\n parameter[1, 2] <- THETA.est\n parameter[1, 3] <- THETA.HPD[1]\n parameter[1, 4] <- THETA.HPD[2]\n parameter[2, 1] <- true.LAMBDA\n parameter[2, 2] <- LAMBDA.est\n parameter[2, 3] <- LAMBDA.HPD[1]\n parameter[2, 4] <- LAMBDA.HPD[2]\n parameter[3, 1] <- true.beta\n parameter[3, 2] <- beta.est\n parameter[3, 3] <- beta.HPD[1]\n parameter[3, 4] <- beta.HPD[2]\n parameter[4, 1] <- true.sigma.alpha\n parameter[4, 2] <- sigma.alpha.est\n parameter[4, 3] <- sigma.alpha.HPD[1]\n parameter[4, 4] <- sigma.alpha.HPD[2]\n parameter[5, 1] <- true.sigma.theta\n parameter[5, 2] <- sigma.theta.est\n parameter[5, 3] <- sigma.theta.HPD[1]\n parameter[5, 4] <- sigma.theta.HPD[2]\n parameter[6, 1] <- true.S_overall\n parameter[6, 2] <- S_overall.est\n parameter[6, 3] <- S_overall.HPD[1]\n parameter[6, 4] <- S_overall.HPD[2]\n parameter[7, 1] <- true.C_overall\n parameter[7, 2] <- C_overall.est\n parameter[7, 3] <- C_overall.HPD[1]\n parameter[7, 4] <- C_overall.HPD[2]\n parameter[8, 1] <- S1_new.est\n parameter[8, 2] <- S1_new.est\n parameter[8, 3] <- S1_new.HPD[1]\n parameter[8, 4] <- S1_new.HPD[2]\n parameter[9, 1] <- C1_new.est\n parameter[9, 2] <- C1_new.est\n parameter[9, 3] <- C1_new.HPD[1]\n parameter[9, 4] <- C1_new.HPD[2]\n long = length(THETA)\n Parameter = matrix(0, nrow = long, ncol = 9)\n colnames(Parameter) = c(\"THETA\", \"LAMBDA\", \"beta\", \"sigma.alpha\", \n \"sigma.theta\", \"S overall\", \"C overall\", \"S1_new\", \n \"C1_new\")\n Parameter[, 1] <- THETA\n Parameter[, 2] <- LAMBDA\n Parameter[, 3] <- beta\n Parameter[, 4] <- sigma.alpha\n Parameter[, 5] <- sigma.theta\n Parameter[, 6] <- S_overall\n Parameter[, 7] <- C_overall\n Parameter[, 8] <- S1_new\n Parameter[, 9] <- C1_new\n if (print_plot == TRUE) {\n if (is.null(chain) == FALSE) {\n plot.name <- \"Trace_plots_for_N=\"\n file.png5 = paste(plot.name, round((iter.num * \n nb_chains - (burn_in) * nb_chains)/Thin, 0), \n \".png\", sep = \"\")\n png(file.png5, width=1440, height=1440, pointsize=18)#, paper = \"a4\", height = 20)\n param = c(\"theta\", \"alpha\", \"PI\", \"S1\", \"C1\")\n Param = c(\"Capital Theta\", \"Capital Lambda\", \n \"beta\", \"~sigma[alpha]\", \"~sigma[theta]\", \"Sensitivity (overall)\", \n \"Specificity (overall)\", \"Sensitivity (new)\", \"Specificity (new)\")\n no_chains = length(chain)\n iter_chain = round((iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin, 0)/no_chains\n min_param = c(min(Parameters[, , 1]), min(Parameters[, \n , 2]), min(Parameters[, , 3]), min(Parameters[, \n , 4]), min(Parameters[, , 5]))\n max_param = c(max(Parameters[, , 1]), max(Parameters[, \n , 2]), max(Parameters[, , 3]), max(Parameters[, \n , 4]), max(Parameters[, , 5]))\n dlag = (max_param - min_param)/100\n range_param = numeric()\n for (j in 1:5) {\n range_param = cbind(range_param, seq(min_param[j] + \n dlag[j]/2, max_param[j] - dlag[j]/2, by = dlag[j]))\n }\n par(mfcol = c(5, 2))\n longueur = 1:iter_chain\n for (j in 1:5) {\n for (i in 1:N) {\n plot(x = longueur, y = Parameters[longueur, \n i, j], type = \"n\", col = 1, ylab = paste(param[j], \n \" of study \", i), xlab = \"iteration number\", \n main = paste(\"Thinning interval = \", thin.interval, \n \"\\n Total samplesize kept = \", (iter.num * \n nb_chains - (burn_in) * nb_chains)/Thin), \n ylim = range(range_param[, j]))\n for (l in 1:length(chain)) {\n lines(x = longueur, y = Parameters[longueur + \n (iter_chain * (l - 1)), i, j], col = l)\n }\n }\n }\n min_Param = c(min(Parameter[, 1]), min(Parameter[, \n 2]), min(Parameter[, 3]), min(Parameter[, 4]), \n min(Parameter[, 5]), min(Parameter[, 6]), min(Parameter[, \n 7]), min(Parameter[, 8]), min(Parameter[, \n 9]))\n max_Param = c(max(Parameter[, 1]), max(Parameter[, \n 2]), max(Parameter[, 3]), max(Parameter[, 4]), \n max(Parameter[, 5]), max(Parameter[, 6]), max(Parameter[, \n 7]), max(Parameter[, 8]), max(Parameter[, \n 9]))\n dlag = (max_Param - min_Param)/100\n range_Param = numeric()\n for (j in 1:9) {\n range_Param = cbind(range_Param, seq(min_Param[j] + \n dlag[j]/2, max_Param[j] - dlag[j]/2, by = dlag[j]))\n }\n for (j in 1:9) {\n plot(x = longueur, y = Parameter[longueur, \n j], type = \"n\", col = 1, ylab = paste(Param[j]), \n xlab = \"iteration number\", main = paste(\"Thinning interval = \", \n thin.interval, \"\\n Total samplesize kept = \", \n (iter.num * nb_chains - (burn_in) * nb_chains)/Thin), \n ylim = range(range_Param[, j]))\n for (l in 1:length(chain)) {\n lines(x = longueur, y = Parameter[longueur + \n (iter_chain * (l - 1)), j], col = l)\n }\n }\n dev.off()\n image.list$trace_plots <- c(file.png5, \"\")\n }\n else {\n plot.name <- \"Trace_plots_for_N=\"\n file.png2 = paste(plot.name, round((iter.num * \n nb_chains - (burn_in) * nb_chains)/Thin, 0), \n \".png\", sep=\"\")\n png(file.png2, width=1440, height=1440, pointsize=18)#, paper = \"a4\", height = 20)\n param = c(\"theta\", \"alpha\", \"PI\", \"S1\", \"C1\")\n Param = c(\"Capital Theta\", \"Capital Lambda\", \n \"beta\", \"~sigma[alpha]\", \"~sigma[theta]\", \"Sensitivity (summary)\", \n \"Specificity (summary)\", \"Sensitivity (new)\", \"Specificity (new)\")\n min_param = c(min(Parameters[, , 1]), min(Parameters[, \n , 2]), min(Parameters[, , 3]), min(Parameters[, \n , 4]), min(Parameters[, , 5]))\n max_param = c(max(Parameters[, , 1]), max(Parameters[, \n , 2]), max(Parameters[, , 3]), max(Parameters[, \n , 4]), max(Parameters[, , 5]))\n dlag = (max_param - min_param)/100\n range_param = numeric()\n for (j in 1:5) {\n range_param = cbind(range_param, seq(min_param[j] + \n dlag[j]/2, max_param[j] - dlag[j]/2, by = dlag[j]))\n }\n par(mfcol = c(5, 2))\n longueur = 1:long\n for (j in 1:5) {\n for (i in 1:N) {\n plot(x = longueur, y = Parameters[, i, j], \n type = \"l\", col = \"grey\", ylab = paste(param[j], \n \" of study \", i), xlab = \"iteration number\", \n main = paste(\"Thinning interval = \", thin.interval, \n \"\\n Total samplesize kept = \", (iter.num * \n nb_chains - (burn_in) * nb_chains)/Thin), \n ylim = range(range_param[, j]))\n abline(a = parameters[i, 3, j], b = 0, col = \"green\", \n lwd = 3)\n abline(a = parameters[i, 4, j], b = 0, col = \"green\", \n lwd = 3)\n }\n }\n min_Param = c(min(Parameter[, 1]), min(Parameter[, \n 2]), min(Parameter[, 3]), min(Parameter[, 4]), \n min(Parameter[, 5]), min(Parameter[, 6]), min(Parameter[, \n 7]), min(Parameter[, 8]), min(Parameter[, \n 9]))\n max_Param = c(max(Parameter[, 1]), max(Parameter[, \n 2]), max(Parameter[, 3]), max(Parameter[, 4]), \n max(Parameter[, 5]), max(Parameter[, 6]), max(Parameter[, \n 7]), max(Parameter[, 8]), max(Parameter[, \n 9]))\n dlag = (max_Param - min_Param)/100\n range_Param = numeric()\n for (j in 1:9) {\n range_Param = cbind(range_Param, seq(min_Param[j] + \n dlag[j]/2, max_Param[j] - dlag[j]/2, by = dlag[j]))\n }\n for (j in 1:9) {\n plot(x = longueur, y = Parameter[, j], type = \"l\", \n col = \"grey\", ylab = paste(Param[j]), xlab = \"iteration number\", \n main = paste(\"Thinning interval = \", thin.interval, \n \"\\n Total samplesize kept = \", (iter.num * \n nb_chains - (burn_in) * nb_chains)/Thin), \n ylim = range(range_Param[, j]))\n abline(a = parameter[j, 3], b = 0, col = \"green\", \n lwd = 3)\n abline(a = parameter[j, 4], b = 0, col = \"green\", \n lwd = 3)\n }\n dev.off()\n image.list$trace_plots <- c(paste(plot.name, \".png\", sep = \"\"), \"\")\n }\n dens.plot.name <- \"Density_plots_for_N3=\"\n file.png3 = paste(dens.plot.name, round((iter.num * \n nb_chains - (burn_in) * nb_chains)/Thin, 0), \n \".png\", sep=\"\")\n png(file.png3, width=1440, height=1440)#, paper = \"a4\", height = 20)\n param = c(\"theta\", \"alpha\", \"PI\", \"S1\", \"C1\")\n Param = c(\"Capital Theta\", \"Capital Lambda\", \"beta\", \n \"~sigma[alpha]\", \"~sigma[theta]\", \"S Overall\", \n \"C Overall\", \"S1_new\", \"C1_new\")\n par(mfcol = c(5, 2))\n longueur = 1:long\n for (j in 1:5) {\n for (i in 1:N) {\n plot(density(Parameters[, i, j]), lwd = 4, \n type = \"l\", col = \"grey\", main = paste(param[j], \n \" of study \", i, \" \\n Thinning interval = \", \n thin.interval, \"\\n Total samplesize kept = \", \n (iter.num * nb_chains - (burn_in) * nb_chains)/Thin))\n }\n }\n for (j in 1:9) {\n plot(density(Parameter[, j]), lwd = 4, type = \"l\", \n col = \"grey\", main = paste(Param[j], \" \\n Thinning interval = \", \n thin.interval, \"\\n Total samplesize kept = \", \n (iter.num * nb_chains - (burn_in) * nb_chains)/Thin))\n }\n dev.off()\n #pdf(\"Summary ROC curve.pdf\")\n # possibly the worse variable naming of all time.\n png(\"Summary_ROC_curve.png\")\n #make.sroc.plot(Parameters, Parameter, data)\n default.x = range(1, 0)\n default.y = range(0, 1)\n plot(x = default.x, y = default.y, type = \"n\", xlim = rev(range(default.x)), \n xlab = \"\", ylab = \"\")\n title(xlab = \"Specificity\", ylab = \"Sensitivity\", \n cex.lab = 1.5, main = \"Summary ROC curve\")\n Sensi1 = apply(as.matrix(Parameters[, , 4]), 2, median)\n Speci1 = apply(as.matrix(Parameters[, , 5]), 2, median)\n Scale_factor = 10\n symbols(Speci1, Sensi1, circles = rowSums(as.matrix(data[[1]])), \n inches = 0.1 * Scale_factor/7, add = TRUE)\n Ov_Se = 1 - pnorm((median(Parameter[, 1]) - median(Parameter[, \n 2])/2)/exp(median(Parameter[, 3])/2))\n Ov_Sp = pnorm((median(Parameter[, 1]) + median(Parameter[, \n 2])/2)/exp(-median(Parameter[, 3])/2))\n points(Ov_Sp, Ov_Se, pch = 19, cex = 2)\n thet = qnorm((1 - as.matrix(Parameters[, , 4])) + \n 1e-14) * exp(Parameter[, 3]/2) + Parameter[, \n 2]/2\n min_TH = quantile(thet, 0.025)\n max_TH = quantile(thet, 0.975)\n dTH = 5e-05\n TH_range = seq(min_TH + dTH/2, max_TH - dTH/2, dTH)\n S_sroc = 1 - pnorm((TH_range - median(Parameter[, \n 2])/2)/exp(median(Parameter[, 3])/2))\n C_sroc = pnorm((TH_range + median(Parameter[, 2])/2)/exp(-median(Parameter[, \n 3])/2))\n lines(C_sroc, S_sroc, lwd = 3, col = \"black\", lty = 1)\n dev.off()\n\n \n }\n }\n else {\n if (real_life == TRUE) {\n d = as.matrix(data[[1]])\n Sample.size = d[, 1] + d[, 2] + d[, 3] + d[, 4]\n pp = d[, 1]\n pn = d[, 2]\n np = d[, 3]\n nn = d[, 4]\n test.file = paste(\"Summary for N =\", round((iter.num * \n nb_chains - (burn_in) * nb_chains)/Thin, 0), \n \".txt\")\n write(paste(\"Number of chains =\", nb_chains), file = test.file, \n append = TRUE)\n write(paste(\"Number of iteration within a chain =\", \n iter.num, \" Burn in within each chain =\", \n burn_in), file = test.file, append = TRUE)\n write(paste(\"Thinning interval =\", Thin), file = test.file, \n append = TRUE)\n write(paste(\"Total number of iteration kept =\", round((iter.num * \n nb_chains - (burn_in) * nb_chains)/Thin, 0)), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"File location : \", path), file = test.file, \n append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"Date :\", Sys.time()), file = test.file, \n append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n if (Gold_Std == TRUE) {\n write(\"Perfect reference standard\", file = test.file, \n append = TRUE)\n }\n else {\n write(\"Imperfect reference standard\", file = test.file, \n append = TRUE)\n }\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\\tSAMPLE SIZE \\t \"), file = test.file, \n append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\" Total ++ +- -+ --\"), file = test.file, \n append = TRUE)\n for (i in 1:N) {\n write(paste(\"Study \", i, \"\", Sample.size[i], \n \"\", pp[i], \"\", pn[i], \"\", np[i], \"\", nn[i]), \n file = test.file, append = TRUE)\n }\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\\tPRIOR INFORMATION \\t \"), file = test.file, \n append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"Prior of prevalence (pi) is \", prior_dist_PI, \n \"(\", round(alpha.PI, digits = 4), \",\", round(beta.PI, \n digits = 4), \"), <=> pi in [\", low.pi, \",\", \n up.pi, \"]\"), file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"Prior of beta is Uniform(\", round(beta.a, \n 4), \",\", round(beta.b, 4), \")\"), file = test.file, \n append = TRUE)\n write(paste(\"Prior of THETA is Uniform(\", prior.THETA.lower, \n \",\", prior.THETA.upper, \")\"), file = test.file, \n append = TRUE)\n write(paste(\"Prior of LAMBDA is Uniform(\", prior.LAMBDA.lower, \n \",\", prior.LAMBDA.upper, \")\"), file = test.file, \n append = TRUE)\n if (prior_sig_a == 1) {\n write(paste(\"Prior of sigma_alpha is uniform(\", \n l.disp.alpha, \",\", u.disp.alpha, \")\"), file = test.file, \n append = TRUE)\n }\n else {\n if (prior_sig_a == 2) {\n write(paste(\"Prior of sigma_alpha^2 is uniform(\", \n l.disp.alpha, \",\", u.disp.alpha, \")\"), file = test.file, \n append = TRUE)\n }\n else {\n if (prior_sig_a == 3) {\n write(paste(\"Prior of precision of sigma_alpha is gamma(\", \n l.disp.alpha, \",\", u.disp.alpha, \")\"), \n file = test.file, append = TRUE)\n }\n }\n }\n if (prior_sig_t == 1) {\n write(paste(\"Prior of sigma_theta is uniform(\", \n l.disp.theta, \",\", u.disp.theta, \")\"), file = test.file, \n append = TRUE)\n }\n else {\n if (prior_sig_t == 2) {\n write(paste(\"Prior of sigma_theta^2 is uniform(\", \n l.disp.theta, \",\", u.disp.theta, \")\"), file = test.file, \n append = TRUE)\n }\n else {\n if (prior_sig_t == 3) {\n write(paste(\"Prior of precision of sigma_theta is gamma(\", \n l.disp.theta, \",\", u.disp.theta, \")\"), \n file = test.file, append = TRUE)\n }\n }\n }\n if (condInd == TRUE & Gold_Std == FALSE & model == \n 1) {\n write(paste(\"\"), file = test.file, append = TRUE)\n if (Gold_se == TRUE & Gold_sp == FALSE) {\n write(paste(\"Prior of S2 (Sensitivity of reference test) is \"), \n file = test.file, append = TRUE)\n for (i in 1:rs.length) {\n write(paste(\"Study(ies) \", sub_rs[[i + 1]][1], \n \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"assumed to be perfect.\"), file = test.file, \n append = TRUE)\n }\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"Prior of C2 (Specificity of reference test) is \"), \n file = test.file, append = TRUE)\n for (i in 1:rs.length) {\n write(paste(\"Study(ies) \", sub_rs[[i + 1]][1], \n \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"\", prior_dist_C2, \"(\", round(Spec2.alpha[i], \n digits = 4), \",\", round(Spec2.beta[i], \n digits = 4), \"), <=> C2 in [\", low.sp[i], \n \",\", up.sp[i], \"]\"), file = test.file, \n append = TRUE)\n }\n }\n else {\n if (Gold_sp == TRUE & Gold_se == FALSE) {\n write(paste(\"Prior of S2 (Sensitivity of reference test) is \"), \n file = test.file, append = TRUE)\n for (i in 1:rs.length) {\n write(paste(\"Study(ies) \", sub_rs[[i + \n 1]][1], \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"\", prior_dist_S2, \"(\", round(Sens2.alpha[i], \n digits = 4), \",\", round(Sens2.beta[i], \n digits = 4), \"), <=> S2 in [\", low.se[i], \n \",\", up.se[i], \"]\"), file = test.file, \n append = TRUE)\n }\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"Prior of C2 (Specificity of reference test) is \"), \n file = test.file, append = TRUE)\n for (i in 1:rs.length) {\n write(paste(\"Study(ies) \", sub_rs[[i + \n 1]][1], \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"assumed to be perfect.\"), file = test.file, \n append = TRUE)\n }\n }\n else {\n write(paste(\"Prior of S2 (Sensitivity of reference test) is \"), \n file = test.file, append = TRUE)\n for (i in 1:rs.length) {\n write(paste(\"Study(ies) \", sub_rs[[i + \n 1]][1], \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"\", prior_dist_S2, \"(\", round(Sens2.alpha[i], \n digits = 4), \",\", round(Sens2.beta[i], \n digits = 4), \"), <=> S2 in [\", low.se[i], \n \",\", up.se[i], \"]\"), file = test.file, \n append = TRUE)\n }\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"Prior of C2 (Specificity of reference test) is \"), \n file = test.file, append = TRUE)\n for (i in 1:rs.length) {\n write(paste(\"Study(ies) \", sub_rs[[i + \n 1]][1], \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"\", prior_dist_C2, \"(\", round(Spec2.alpha[i], \n digits = 4), \",\", round(Spec2.beta[i], \n digits = 4), \"), <=> C2 in [\", low.sp[i], \n \",\", up.sp[i], \"]\"), file = test.file, \n append = TRUE)\n }\n }\n }\n }\n else {\n if (condInd == TRUE & Gold_Std == FALSE & model == \n 2) {\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"Prior of a1 is \"), file = test.file, \n append = TRUE)\n for (i in 1:rs.length) {\n write(paste(\"Study(ies) \", sub_rs[[i + 1]][1], \n \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"Normal (\", round(mean.a1[i], \n digits = 4), \",\", round(sd.a1[i], digits = 4), \n \"), <=> S2 in []\"), file = test.file, append = TRUE)\n }\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"Prior of a0 is \"), file = test.file, \n append = TRUE)\n for (i in 1:rs.length) {\n write(paste(\"Study(ies) \", sub_rs[[i + 1]][1], \n \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"Normal (\", round(mean.a0[i], \n digits = 4), \",\", round(sd.a0[i], digits = 4), \n \"), <=> C2 in []\"), file = test.file, append = TRUE)\n }\n }\n else {\n if (condInd == FALSE) {\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"Prior of a1 is \"), file = test.file, \n append = TRUE)\n for (i in 1:rs.length) {\n write(paste(\"Study(ies) \", sub_rs[[i + \n 1]][1], \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"Normal (\", round(mean.a1[i], \n digits = 4), \",\", round(sd.a1[i], digits = 4), \n \"), <=> S2 in []\"), file = test.file, \n append = TRUE)\n }\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"Prior of a0 is \"), file = test.file, \n append = TRUE)\n for (i in 1:rs.length) {\n write(paste(\"Study(ies) \", sub_rs[[i + \n 1]][1], \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"Normal (\", round(mean.a0[i], \n digits = 4), \",\", round(sd.a0[i], digits = 4), \n \"), <=> C2 in []\"), file = test.file, \n append = TRUE)\n }\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"Prior of b1 is \"), file = test.file, \n append = TRUE)\n for (i in 1:rs.length) {\n write(paste(\"Study(ies) \", sub_rs[[i + \n 1]][1], \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"Uniform (\", round(low.b1[i], \n digits = 4), \",\", round(up.b1[i], digits = 4), \n \"), <=> S2 in []\"), file = test.file, \n append = TRUE)\n }\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"Prior of b0 is \"), file = test.file, \n append = TRUE)\n for (i in 1:rs.length) {\n write(paste(\"Study(ies) \", sub_rs[[i + \n 1]][1], \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"Uniform (\", round(low.b0[i], \n digits = 4), \",\", round(up.b0[i], digits = 4), \n \"), <=> C2 in []\"), file = test.file, \n append = TRUE)\n }\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"Prior of d1 is Uniform(\", round(low.d1, \n 4), \",\", round(up.d1, 4), \")\"), file = test.file, \n append = TRUE)\n write(paste(\"Prior of d0 is Uniform(\", round(low.d0, \n 4), \",\", round(up.d0, 4), \")\"), file = test.file, \n append = TRUE)\n }\n }\n }\n write(paste(), file = test.file, append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\\tBETWEEN_STUDY parameters (Point estimate =\", \n point_estimate, \")\\t \"), file = test.file, append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\" Estimate Standard_Dev MC_error C.I._lower C.I._upper\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"THETA \", round(THETA.est, digits = digit), \n \"\", round(THETA.sd, digits = digit), \"\", round(THETA.MCerror, \n digits = digit), \"\", round(THETA.HPD[1], digits = digit), \n \"\", round(THETA.HPD[2], digits = digit)), file = test.file, \n append = TRUE)\n write(paste(\"LAMBDA \", round(LAMBDA.est, digits = digit), \n \"\", round(LAMBDA.sd, digits = digit), \"\", round(LAMBDA.MCerror, \n digits = digit), \"\", round(LAMBDA.HPD[1], digits = digit), \n \"\", round(LAMBDA.HPD[2], digits = digit)), file = test.file, \n append = TRUE)\n write(paste(\"beta \", round(beta.est, digits = digit), \n \"\", round(beta.sd, digits = digit), \"\", round(beta.MCerror, \n digits = digit), \"\", round(beta.HPD[1], digits = digit), \n \"\", round(beta.HPD[2], digits = digit)), file = test.file, \n append = TRUE)\n write(paste(\"sigma.alpha \", round(sigma.alpha.est, \n digits = digit), \"\", round(sigma.alpha.sd, digits = digit), \n \"\", round(sigma.alpha.MCerror, digits = digit), \n \"\", round(sigma.alpha.HPD[1], digits = digit), \n \"\", round(sigma.alpha.HPD[2], digits = digit)), \n file = test.file, append = TRUE)\n write(paste(\"sigma.theta \", round(sigma.theta.est, \n digits = digit), \"\", round(sigma.theta.sd, digits = digit), \n \"\", round(sigma.theta.MCerror, digits = digit), \n \"\", round(sigma.theta.HPD[1], digits = digit), \n \"\", round(sigma.theta.HPD[2], digits = digit)), \n file = test.file, append = TRUE)\n write(paste(\"S overall \", round(S_overall.est, \n digits = digit), \"\", round(S_overall.sd, digits = digit), \n \"\", round(S_overall.MCerror, digits = digit), \n \"\", round(S_overall.HPD[1], digits = digit), \n \"\", round(S_overall.HPD[2], digits = digit)), \n file = test.file, append = TRUE)\n write(paste(\"C overall \", round(C_overall.est, \n digits = digit), \"\", round(C_overall.sd, digits = digit), \n \"\", round(C_overall.MCerror, digits = digit), \n \"\", round(C_overall.HPD[1], digits = digit), \n \"\", round(C_overall.HPD[2], digits = digit)), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n if (condInd == TRUE & Gold_Std == FALSE & model == \n 1) {\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\\tReference standard (Point estimate =\", \n point_estimate, \")\\t \"), file = test.file, \n append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\" Estimate Standard_Dev MC_error C.I._lower C.I._upper\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n if (Gold_se == TRUE & Gold_sp == FALSE) {\n if (rs.length != 1) {\n for (i in 1:rs.length) {\n write(paste(\"S2 of Study(ies) \", sub_rs[[i + \n 1]][1], \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"(It was assumed to be perfect)\"), \n file = test.file, append = TRUE)\n write(paste(\"C2 of Study(ies) \", sub_rs[[i + \n 1]][1], \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"\", round(C2.est[i], digits = digit), \n \"\", round(C2.sd[i], digits = digit), \n \"\", round(C2.MCerror[i], digits = digit), \n \"\", round(C2.HPD[i, 1], digits = digit), \n \"\", round(C2.HPD[i, 2], digits = digit)), \n file = test.file, append = TRUE)\n }\n }\n else {\n write(paste(\"S2 (It was assumed to be perfect)\"), \n file = test.file, append = TRUE)\n write(paste(\"C2 \", round(C2.est, digits = digit), \n \"\", round(C2.sd, digits = digit), \"\", round(C2.MCerror, \n digits = digit), \"\", round(C2.HPD[1], \n digits = digit), \"\", round(C2.HPD[2], \n digits = digit)), file = test.file, append = TRUE)\n }\n }\n else {\n if (Gold_sp == TRUE & Gold_se == FALSE) {\n if (rs.length != 1) {\n for (i in 1:rs.length) {\n write(paste(\"S2 of Study(ies) \", sub_rs[[i + \n 1]][1], \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"\", round(S2.est[i], digits = digit), \n \"\", round(S2.sd[i], digits = digit), \n \"\", round(S2.MCerror[i], digits = digit), \n \"\", round(S2.HPD[i, 1], digits = digit), \n \"\", round(S2.HPD[i, 2], digits = digit)), \n file = test.file, append = TRUE)\n write(paste(\"C2 of Study(ies) \", sub_rs[[i + \n 1]][1], \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"(It was assumed to be perfect)\"), \n file = test.file, append = TRUE)\n }\n }\n else {\n write(paste(\"S2 \", round(S2.est, \n digits = digit), \"\", round(S2.sd, digits = digit), \n \"\", round(S2.MCerror, digits = digit), \n \"\", round(S2.HPD[1], digits = digit), \n \"\", round(S2.HPD[2], digits = digit)), \n file = test.file, append = TRUE)\n write(paste(\"C2 (It was assumed to be perfect)\"), \n file = test.file, append = TRUE)\n }\n }\n else {\n if (rs.length != 1) {\n for (i in 1:rs.length) {\n write(paste(\"S2 of Study(ies) \", sub_rs[[i + \n 1]][1], \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"\", round(S2.est[i], digits = digit), \n \"\", round(S2.sd[i], digits = digit), \n \"\", round(S2.MCerror[i], digits = digit), \n \"\", round(S2.HPD[i, 1], digits = digit), \n \"\", round(S2.HPD[i, 2], digits = digit)), \n file = test.file, append = TRUE)\n }\n for (i in 1:rs.length) {\n write(paste(\"C2 of Study(ies) \", sub_rs[[i + \n 1]][1], \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"\", round(C2.est[i], digits = digit), \n \"\", round(C2.sd[i], digits = digit), \n \"\", round(C2.MCerror[i], digits = digit), \n \"\", round(C2.HPD[i, 1], digits = digit), \n \"\", round(C2.HPD[i, 2], digits = digit)), \n file = test.file, append = TRUE)\n }\n }\n else {\n write(paste(\"S2 \", round(S2.est, \n digits = digit), \"\", round(S2.sd, digits = digit), \n \"\", round(S2.MCerror, digits = digit), \n \"\", round(S2.HPD[1], digits = digit), \n \"\", round(S2.HPD[2], digits = digit)), \n file = test.file, append = TRUE)\n write(paste(\"C2 \", round(C2.est, \n digits = digit), \"\", round(C2.sd, digits = digit), \n \"\", round(C2.MCerror, digits = digit), \n \"\", round(C2.HPD[1], digits = digit), \n \"\", round(C2.HPD[2], digits = digit)), \n file = test.file, append = TRUE)\n }\n }\n }\n }\n else {\n if (condInd == TRUE & Gold_Std == FALSE & model == \n 2) {\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\\tReference standard (Point estimate =\", \n point_estimate, \")\\t \"), file = test.file, \n append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\" Estimate Standard_Dev MC_error C.I._lower C.I._upper\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n if (rs.length != 1) {\n for (i in 1:rs.length) {\n write(paste(\"a1 of Study(ies) \", sub_rs[[i + \n 1]][1], \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"\", round(a1.est[i], digits = digit), \n \"\", round(a1.sd[i], digits = digit), \n \"\", round(a1.MCerror[i], digits = digit), \n \"\", round(a1.HPD[i, 1], digits = digit), \n \"\", round(a1.HPD[i, 2], digits = digit)), \n file = test.file, append = TRUE)\n }\n for (i in 1:rs.length) {\n write(paste(\"a0 of Study(ies) \", sub_rs[[i + \n 1]][1], \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"\", round(a0.est[i], digits = digit), \n \"\", round(a0.sd[i], digits = digit), \n \"\", round(a0.MCerror[i], digits = digit), \n \"\", round(a0.HPD[i, 1], digits = digit), \n \"\", round(a0.HPD[i, 2], digits = digit)), \n file = test.file, append = TRUE)\n }\n write(paste(\"\"), file = test.file, append = TRUE)\n for (i in 1:rs.length) {\n write(paste(\"S2 of Study(ies) \", sub_rs[[i + \n 1]][1], \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"\", round(S2.est[i], digits = digit), \n \"\", round(S2.sd[i], digits = digit), \n \"\", round(S2.MCerror[i], digits = digit), \n \"\", round(S2.HPD[i, 1], digits = digit), \n \"\", round(S2.HPD[i, 2], digits = digit)), \n file = test.file, append = TRUE)\n }\n for (i in 1:rs.length) {\n write(paste(\"C2 of Study(ies) \", sub_rs[[i + \n 1]][1], \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"\", round(C2.est[i], digits = digit), \n \"\", round(C2.sd[i], digits = digit), \n \"\", round(C2.MCerror[i], digits = digit), \n \"\", round(C2.HPD[i, 1], digits = digit), \n \"\", round(C2.HPD[i, 2], digits = digit)), \n file = test.file, append = TRUE)\n }\n }\n else {\n write(paste(\"a1 \", round(a1.est, digits = digit), \n \"\", round(a1.sd, digits = digit), \"\", round(a1.MCerror, \n digits = digit), \"\", round(a1.HPD[1], \n digits = digit), \"\", round(a1.HPD[2], \n digits = digit)), file = test.file, append = TRUE)\n write(paste(\"a0 \", round(a0.est, digits = digit), \n \"\", round(a0.sd, digits = digit), \"\", round(a0.MCerror, \n digits = digit), \"\", round(a0.HPD[1], \n digits = digit), \"\", round(a0.HPD[2], \n digits = digit)), file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"S2 \", round(S2.est, digits = digit), \n \"\", round(S2.sd, digits = digit), \"\", round(S2.MCerror, \n digits = digit), \"\", round(S2.HPD[1], \n digits = digit), \"\", round(S2.HPD[2], \n digits = digit)), file = test.file, append = TRUE)\n write(paste(\"C2 \", round(C2.est, digits = digit), \n \"\", round(C2.sd, digits = digit), \"\", round(C2.MCerror, \n digits = digit), \"\", round(C2.HPD[1], \n digits = digit), \"\", round(C2.HPD[2], \n digits = digit)), file = test.file, append = TRUE)\n }\n }\n else {\n if (condInd == FALSE) {\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\\tReference standard (Point estimate =\", \n point_estimate, \")\\t \"), file = test.file, \n append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\" Estimate Standard_Dev MC_error C.I._lower C.I._upper\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"d1 \", round(d1.est, digits = digit), \n \"\", round(d1.sd, digits = digit), \"\", round(d1.MCerror, \n digits = digit), \"\", round(d1.HPD[1], \n digits = digit), \"\", round(d1.HPD[2], \n digits = digit)), file = test.file, append = TRUE)\n write(paste(\"d0 \", round(d0.est, digits = digit), \n \"\", round(d0.sd, digits = digit), \"\", round(d0.MCerror, \n digits = digit), \"\", round(d0.HPD[1], \n digits = digit), \"\", round(d0.HPD[2], \n digits = digit)), file = test.file, append = TRUE)\n if (rs.length != 1) {\n for (i in 1:rs.length) {\n write(paste(\"a1 of Study(ies) \", sub_rs[[i + \n 1]][1], \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"\", round(a1.est[i], digits = digit), \n \"\", round(a1.sd[i], digits = digit), \n \"\", round(a1.MCerror[i], digits = digit), \n \"\", round(a1.HPD[i, 1], digits = digit), \n \"\", round(a1.HPD[i, 2], digits = digit)), \n file = test.file, append = TRUE)\n }\n for (i in 1:rs.length) {\n write(paste(\"a0 of Study(ies) \", sub_rs[[i + \n 1]][1], \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"\", round(a0.est[i], digits = digit), \n \"\", round(a0.sd[i], digits = digit), \n \"\", round(a0.MCerror[i], digits = digit), \n \"\", round(a0.HPD[i, 1], digits = digit), \n \"\", round(a0.HPD[i, 2], digits = digit)), \n file = test.file, append = TRUE)\n }\n write(paste(\"\"), file = test.file, append = TRUE)\n for (i in 1:rs.length) {\n write(paste(\"S2 of Study(ies) \", sub_rs[[i + \n 1]][1], \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"\", round(S2.est[i], digits = digit), \n \"\", round(S2.sd[i], digits = digit), \n \"\", round(S2.MCerror[i], digits = digit), \n \"\", round(S2.HPD[i, 1], digits = digit), \n \"\", round(S2.HPD[i, 2], digits = digit)), \n file = test.file, append = TRUE)\n }\n for (i in 1:rs.length) {\n write(paste(\"C2 of Study(ies) \", sub_rs[[i + \n 1]][1], \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"\", round(C2.est[i], digits = digit), \n \"\", round(C2.sd[i], digits = digit), \n \"\", round(C2.MCerror[i], digits = digit), \n \"\", round(C2.HPD[i, 1], digits = digit), \n \"\", round(C2.HPD[i, 2], digits = digit)), \n file = test.file, append = TRUE)\n }\n for (i in 1:rs.length) {\n write(paste(\"b1 of Study(ies) \", sub_rs[[i + \n 1]][1], \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"\", round(b1.est[i], digits = digit), \n \"\", round(b1.sd[i], digits = digit), \n \"\", round(b1.MCerror[i], digits = digit), \n \"\", round(b1.HPD[i, 1], digits = digit), \n \"\", round(b1.HPD[i, 2], digits = digit)), \n file = test.file, append = TRUE)\n }\n for (i in 1:rs.length) {\n write(paste(\"b0 of Study(ies) \", sub_rs[[i + \n 1]][1], \"to\", sub_rs[[i + 1]][length(sub_rs[[i + \n 1]])], \"\", round(b0.est[i], digits = digit), \n \"\", round(b0.sd[i], digits = digit), \n \"\", round(b0.MCerror[i], digits = digit), \n \"\", round(b0.HPD[i, 1], digits = digit), \n \"\", round(b0.HPD[i, 2], digits = digit)), \n file = test.file, append = TRUE)\n }\n }\n else {\n write(paste(\"a1 \", round(a1.est, \n digits = digit), \"\", round(a1.sd, digits = digit), \n \"\", round(a1.MCerror, digits = digit), \n \"\", round(a1.HPD[1], digits = digit), \n \"\", round(a1.HPD[2], digits = digit)), \n file = test.file, append = TRUE)\n write(paste(\"a0 \", round(a0.est, \n digits = digit), \"\", round(a0.sd, digits = digit), \n \"\", round(a0.MCerror, digits = digit), \n \"\", round(a0.HPD[1], digits = digit), \n \"\", round(a0.HPD[2], digits = digit)), \n file = test.file, append = TRUE)\n write(paste(\"b1 \", round(b1.est, \n digits = digit), \"\", round(b1.sd, digits = digit), \n \"\", round(b1.MCerror, digits = digit), \n \"\", round(b1.HPD[1], digits = digit), \n \"\", round(b1.HPD[2], digits = digit)), \n file = test.file, append = TRUE)\n write(paste(\"b0 \", round(b0.est, \n digits = digit), \"\", round(b0.sd, digits = digit), \n \"\", round(b0.MCerror, digits = digit), \n \"\", round(b0.HPD[1], digits = digit), \n \"\", round(b0.HPD[2], digits = digit)), \n file = test.file, append = TRUE)\n write(paste(\"S2 \", round(S2.est, \n digits = digit), \"\", round(S2.sd, digits = digit), \n \"\", round(S2.MCerror, digits = digit), \n \"\", round(S2.HPD[1], digits = digit), \n \"\", round(S2.HPD[2], digits = digit)), \n file = test.file, append = TRUE)\n write(paste(\"C2 \", round(C2.est, \n digits = digit), \"\", round(C2.sd, digits = digit), \n \"\", round(C2.MCerror, digits = digit), \n \"\", round(C2.HPD[1], digits = digit), \n \"\", round(C2.HPD[2], digits = digit)), \n file = test.file, append = TRUE)\n }\n }\n }\n }\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\\tWITHIN-STUDY PARAMETERS \\t \"), file = test.file, \n append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\\ttheta \\t \"), file = test.file, append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\" Estimate Standard_Dev MC_error C.I._lower C.I._upper\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n for (i in 1:N) {\n write(paste(\"Study \", i, \"\", round(theta.est[i], \n digits = digit), \"\", round(theta.sd[i], digits = digit), \n \"\", round(theta.MCerror[i], digits = digit), \n \"\", round(theta.HPD[i, 1], digits = digit), \n \"\", round(theta.HPD[i, 2], digits = digit)), \n file = test.file, append = TRUE)\n }\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\\talpha \\t \"), file = test.file, append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\" Estimate Standard_Dev MC_error C.I._lower C.I._upper\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n for (i in 1:N) {\n write(paste(\"Study \", i, \"\", round(alpha.est[i], \n digits = digit), \"\", round(alpha.sd[i], digits = digit), \n \"\", round(alpha.MCerror[i], digits = digit), \n \"\", round(alpha.HPD[i, 1], digits = digit), \n \"\", round(alpha.HPD[i, 2], digits = digit)), \n file = test.file, append = TRUE)\n }\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\\tPrevalence \\t \"), file = test.file, \n append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\" Estimate Standard_Dev MC_error C.I._lower C.I._upper\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n for (i in 1:N) {\n write(paste(\"Study \", i, \"\", round(PI.est[i], \n digits = digit), \"\", round(PI.sd[i], digits = digit), \n \"\", round(PI.MCerror[i], digits = digit), \"\", \n round(PI.HPD[i, 1], digits = digit), \"\", round(PI.HPD[i, \n 2], digits = digit)), file = test.file, append = TRUE)\n }\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\\tSensitivity of test 1 (S1) \\t \"), \n file = test.file, append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\" Estimate Standard_Dev MC_error C.I._lower C.I._upper\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n for (i in 1:N) {\n write(paste(\"Study \", i, \"\", round(S1.est[i], \n digits = digit), \"\", round(S1.sd[i], digits = digit), \n \"\", round(S1.MCerror[i], digits = digit), \"\", \n round(S1.HPD[i, 1], digits = digit), \"\", round(S1.HPD[i, \n 2], digits = digit)), file = test.file, append = TRUE)\n }\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\\tSpecificity of test 1 (C1) \\t \"), \n file = test.file, append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\" Estimate Standard_Dev MC_error C.I._lower C.I._upper\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n for (i in 1:N) {\n write(paste(\"Study \", i, \"\", round(C1.est[i], \n digits = digit), \"\", round(C1.sd[i], digits = digit), \n \"\", round(C1.MCerror[i], digits = digit), \"\", \n round(C1.HPD[i, 1], digits = digit), \"\", round(C1.HPD[i, \n 2], digits = digit)), file = test.file, append = TRUE)\n }\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\\tPosterior predictive value of Sensitivity of test under evaluation (S1) \\t \"), \n file = test.file, append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\" Estimate Standard_Dev MC_error C.I._lower C.I._upper\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"Sensitivity \", round(S1_new.est, digits = digit), \n \"\", round(S1_new.sd, digits = digit), \"\", round(S1_new.MCerror, \n digits = digit), \"\", round(S1_new.HPD[1], digits = digit), \n \"\", round(S1_new.HPD[2], digits = digit)), file = test.file, \n append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\\tPosterior predictive value of Specificity of test under evaluation (C1) \\t \"), \n file = test.file, append = TRUE)\n write(paste(\"______________________________________________________\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\" Estimate Standard_Dev MC_error C.I._lower C.I._upper\"), \n file = test.file, append = TRUE)\n write(paste(\"\"), file = test.file, append = TRUE)\n write(paste(\"Specificity\", round(C1_new.est, digits = digit), \n \"\", round(C1_new.sd, digits = digit), \"\", round(C1_new.MCerror, \n digits = digit), \"\", round(C1_new.HPD[1], digits = digit), \n \"\", round(C1_new.HPD[2], digits = digit)), file = test.file, \n append = TRUE)\n Num_study = c()\n for (i in 1:N) {\n Num_study = c(Num_study, paste(\"Study\", i))\n }\n if (condInd == TRUE & Gold_Std == FALSE & model == \n 1) {\n if (Gold_se == TRUE & Gold_sp == FALSE) {\n if (rs.length != 1) {\n refstd_parameters = array(0, dim = c(rs.length, \n 3, 1), dimnames = list(1:rs.length, c(paste(point_estimate, \n \"estimate\"), \"HPD lower\", \"HPD upper\"), \n \"C2\"))\n refstd_parameters[, 1, 1] <- C2.est\n refstd_parameters[, 2, 1] <- C2.HPD[, 1]\n refstd_parameters[, 3, 1] <- C2.HPD[, 2]\n long = length(alpha[, 1])\n refstd_Parameters = array(0, c(long, rs.length, \n 1))\n refstd_Parameters[, , 1] <- C2\n if (print_plot == TRUE) {\n if (is.null(chain) == FALSE) {\n file.png_RS = paste(\"RefStd trace plots for N =\", \n round((iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin, 0), \".png\")\n png(file.png_RS)#, paper = \"a4\", height = 20)\n param = \"Specificity\"\n par(mfcol = c(5, 2))\n no_chains = length(chain)\n iter_chain = round((iter.num * nb_chains - \n (burn_in) * nb_chains)/Thin, 0)/no_chains\n longueur = 1:iter_chain\n for (i in 1:rs.length) {\n plot(x = longueur, y = refstd_Parameters[longueur, \n i, 1], type = \"n\", col = 1, ylab = paste(param, \n \" of reference standard \", i), xlab = \"iteration number\", \n main = paste(\"Thinning interval = \", \n thin.interval, \"\\n Total samplesize kept = \", \n (iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin))\n for (l in 1:length(chain)) {\n lines(x = longueur, y = refstd_Parameters[longueur + \n (iter_chain * (l - 1)), i, 1], \n col = l)\n }\n }\n }\n else {\n file.png_RS = paste(\"RefStd trace plots for N =\", \n round((iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin, 0), \".png\")\n png(file.png_RS)#, paper = \"a4\", height = 20)\n param = \"Specificity\"\n par(mfcol = c(5, 2))\n longueur = 1:long\n for (i in 1:rs.length) {\n plot(x = longueur, y = refstd_Parameters[, \n i, 1], type = \"l\", col = \"grey\", \n ylab = paste(param, \" of reference standard \", \n i), xlab = \"iteration number\", \n main = paste(\"Thinning interval = \", \n thin.interval, \"\\n Total samplesize kept = \", \n (iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin))\n }\n }\n dev.off()\n file.png_RS2 = paste(\"RefStd density plots for N =\", \n round((iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin, 0), \".png\")\n png(file.png_RS2)#, paper = \"a4\", height = 20)\n param = \"Specificity\"\n par(mfcol = c(5, 2))\n longueur = 1:long\n for (i in 1:rs.length) {\n plot(density(refstd_Parameters[, i, 1]), \n lwd = 4, type = \"l\", col = \"grey\", \n main = paste(param, \" of reference standard \", \n i, \" \\n Thinning interval = \", thin.interval, \n \"\\n Total samplesize kept = \", (iter.num * \n nb_chains - (burn_in) * nb_chains)/Thin))\n }\n dev.off()\n }\n }\n else {\n refstd_parameters = matrix(0, ncol = 3, nrow = 1)\n rownames(refstd_parameters) = c(\"C2\")\n colnames(refstd_parameters) = c(paste(point_estimate, \n \"estimate\"), \"HPD.low\", \"HPD.high\")\n refstd_parameters[1, 1] <- C2.est\n refstd_parameters[1, 2] <- C2.HPD[1]\n refstd_parameters[1, 3] <- C2.HPD[2]\n long = length(THETA)\n refstd_Parameters = matrix(0, nrow = long, \n ncol = 1)\n colnames(refstd_Parameters) = c(\"C2\")\n refstd_Parameters[, 1] <- C2\n if (print_plot == TRUE) {\n if (is.null(chain) == FALSE) {\n file.png_RS = paste(\"RefStd trace plots for N =\", \n round((iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin, 0), \".png\")\n png(file.png_RS)#, paper = \"a4\", height = 20)\n param = c(\"Specificity of reference standard\")\n par(mfcol = c(5, 2))\n no_chains = length(chain)\n iter_chain = round((iter.num * nb_chains - \n (burn_in) * nb_chains)/Thin, 0)/no_chains\n longueur = 1:iter_chain\n plot(x = longueur, y = refstd_Parameters[longueur, \n 1], type = \"n\", col = 1, ylab = paste(param), \n xlab = \"iteration number\", main = paste(\"Thinning interval = \", \n thin.interval, \"\\n Total samplesize kept = \", \n (iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin))\n for (l in 1:length(chain)) {\n lines(x = longueur, y = refstd_Parameters[longueur + \n (iter_chain * (l - 1)), 1], col = l)\n }\n }\n else {\n file.png_RS = paste(\"RefStd trace plots for N =\", \n round((iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin, 0), \".png\")\n png(file.png_RS)#, paper = \"a4\", height = 20)\n param = c(\"Specificity of reference standard\")\n par(mfcol = c(5, 2))\n longueur = 1:long\n plot(x = longueur, y = refstd_Parameters[, \n 1], type = \"l\", col = \"grey\", ylab = paste(param), \n xlab = \"iteration number\", main = paste(\"Thinning interval = \", \n thin.interval, \"\\n Total samplesize kept = \", \n (iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin))\n }\n dev.off()\n file.png_RS2 = paste(\"RefStd density plots for N =\", \n round((iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin, 0), \".png\")\n png(file.png_RS2)#, paper = \"a4\", height = 20)\n param = c(\"Specificity\")\n par(mfcol = c(5, 2))\n longueur = 1:long\n plot(density(refstd_Parameters[, 1]), lwd = 4, \n type = \"l\", col = \"grey\", main = paste(param, \n \" of reference standard \\n Thinning interval = \", \n thin.interval, \"\\n Total samplesize kept = \", \n (iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin))\n dev.off()\n }\n }\n }\n else {\n if (Gold_sp == TRUE & Gold_se == FALSE) {\n if (rs.length != 1) {\n refstd_parameters = array(0, dim = c(rs.length, \n 3, 1), dimnames = list(1:rs.length, c(paste(point_estimate, \n \"estimate\"), \"HPD lower\", \"HPD upper\"), \n \"S2\"))\n refstd_parameters[, 1, 1] <- S2.est\n refstd_parameters[, 2, 1] <- S2.HPD[, 1]\n refstd_parameters[, 3, 1] <- S2.HPD[, 2]\n long = length(alpha[, 1])\n refstd_Parameters = array(0, c(long, rs.length, \n 1))\n refstd_Parameters[, , 1] <- S2\n if (print_plot == TRUE) {\n if (is.null(chain) == FALSE) {\n file.png_RS = paste(\"RefStd trace plots for N =\", \n round((iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin, 0), \".png\")\n png(file.png_RS)#, paper = \"a4\", height = 20)\n param = c(\"Sensitivity\")\n par(mfcol = c(5, 2))\n no_chains = length(chain)\n iter_chain = round((iter.num * nb_chains - \n (burn_in) * nb_chains)/Thin, 0)/no_chains\n longueur = 1:iter_chain\n for (i in 1:rs.length) {\n plot(x = longueur, y = refstd_Parameters[longueur, \n i, 1], type = \"n\", col = 1, ylab = paste(param, \n \" of reference standard \", i), \n xlab = \"iteration number\", main = paste(\"Thinning interval = \", \n thin.interval, \"\\n Total samplesize kept = \", \n (iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin))\n for (l in 1:length(chain)) {\n lines(x = longueur, y = refstd_Parameters[longueur + \n (iter_chain * (l - 1)), i, 1], \n col = l)\n }\n }\n }\n else {\n file.png_RS = paste(\"RefStd trace plots for N =\", \n round((iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin, 0), \".png\")\n png(file.png_RS)#, paper = \"a4\", height = 20)\n param = c(\"Sensitivity\")\n par(mfcol = c(5, 2))\n longueur = 1:long\n for (i in 1:rs.length) {\n plot(x = longueur, y = refstd_Parameters[, \n i, 1], type = \"l\", col = \"grey\", \n ylab = paste(param, \" of reference standard \", \n i), xlab = \"iteration number\", \n main = paste(\"Thinning interval = \", \n thin.interval, \"\\n Total samplesize kept = \", \n (iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin))\n }\n }\n dev.off()\n file.png_RS2 = paste(\"RefStd density plots for N =\", \n round((iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin, 0), \".png\")\n png(file.png_RS2)#, paper = \"a4\", height = 20)\n param = c(\"Sensitivity\")\n par(mfcol = c(5, 2))\n longueur = 1:long\n for (i in 1:rs.length) {\n plot(density(refstd_Parameters[, i, \n 1]), lwd = 4, type = \"l\", col = \"grey\", \n main = paste(param, \" of reference standard \", \n i, \" \\n Thinning interval = \", \n thin.interval, \"\\n Total samplesize kept = \", \n (iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin))\n }\n dev.off()\n }\n }\n else {\n refstd_parameters = matrix(0, ncol = 3, \n nrow = 1)\n rownames(refstd_parameters) = c(\"S2\")\n colnames(refstd_parameters) = c(paste(point_estimate, \n \"estimate\"), \"HPD.low\", \"HPD.high\")\n refstd_parameters[1, 1] <- S2.est\n refstd_parameters[1, 2] <- S2.HPD[1]\n refstd_parameters[1, 3] <- S2.HPD[2]\n long = length(THETA)\n refstd_Parameters = matrix(0, nrow = long, \n ncol = 1)\n colnames(refstd_Parameters) = c(\"S2\")\n refstd_Parameters[, 1] <- S2\n if (print_plot == TRUE) {\n if (is.null(chain) == FALSE) {\n file.png_RS = paste(\"RefStd trace plots for N =\", \n round((iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin, 0), \".png\")\n png(file.png_RS)#, paper = \"a4\", height = 20)\n param = c(\"Sensitivity of reference standard\")\n par(mfcol = c(5, 2))\n no_chains = length(chain)\n iter_chain = round((iter.num * nb_chains - \n (burn_in) * nb_chains)/Thin, 0)/no_chains\n longueur = 1:iter_chain\n plot(x = longueur, y = refstd_Parameters[longueur, \n 1], type = \"n\", col = 1, ylab = paste(param), \n xlab = \"iteration number\", main = paste(\"Thinning interval = \", \n thin.interval, \"\\n Total samplesize kept = \", \n (iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin))\n for (l in 1:length(chain)) {\n lines(x = longueur, y = refstd_Parameters[longueur + \n (iter_chain * (l - 1)), 1], col = l)\n }\n }\n else {\n file.png_RS = paste(\"RefStd trace plots for N =\", \n round((iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin, 0), \".png\")\n png(file.png_RS)#, paper = \"a4\", height = 20)\n param = c(\"Sensitivity of reference standard\")\n par(mfcol = c(5, 2))\n longueur = 1:long\n plot(x = longueur, y = refstd_Parameters[, \n 1], type = \"l\", col = \"grey\", ylab = paste(param), \n xlab = \"iteration number\", main = paste(\"Thinning interval = \", \n thin.interval, \"\\n Total samplesize kept = \", \n (iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin))\n }\n dev.off()\n file.png_RS2 = paste(\"RefStd density plots for N =\", \n round((iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin, 0), \".png\")\n png(file.png_RS2)#, paper = \"a4\", height = 20)\n param = c(\"Sensitivity of reference standard\")\n par(mfcol = c(5, 2))\n longueur = 1:long\n plot(density(refstd_Parameters[, 1]), \n lwd = 4, type = \"l\", col = \"grey\", \n main = paste(param, \" \\n Thinning interval = \", \n thin.interval, \"\\n Total samplesize kept = \", \n (iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin))\n dev.off()\n }\n }\n }\n else {\n if (rs.length != 1) {\n refstd_parameters = array(0, dim = c(rs.length, \n 3, 2), dimnames = list(1:rs.length, c(paste(point_estimate, \n \"estimate\"), \"HPD lower\", \"HPD upper\"), \n c(\"S2\", \"C2\")))\n refstd_parameters[, 1, 1] <- S2.est\n refstd_parameters[, 2, 1] <- S2.HPD[, 1]\n refstd_parameters[, 3, 1] <- S2.HPD[, 2]\n refstd_parameters[, 1, 2] <- C2.est\n refstd_parameters[, 2, 2] <- C2.HPD[, 1]\n refstd_parameters[, 3, 2] <- C2.HPD[, 2]\n long = length(alpha[, 1])\n refstd_Parameters = array(0, c(long, rs.length, \n 2))\n refstd_Parameters[, , 1] <- S2\n refstd_Parameters[, , 2] <- C2\n if (print_plot == TRUE) {\n if (is.null(chain) == FALSE) {\n file.png_RS = paste(\"RefStd trace plots for N =\", \n round((iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin, 0), \".png\")\n png(file.png_RS)#, paper = \"a4\", height = 20)\n param = c(\"Sensitivity\", \"Specificity\")\n par(mfcol = c(5, 2))\n no_chains = length(chain)\n iter_chain = round((iter.num * nb_chains - \n (burn_in) * nb_chains)/Thin, 0)/no_chains\n longueur = 1:iter_chain\n for (j in 1:2) {\n for (i in 1:rs.length) {\n plot(x = longueur, y = refstd_Parameters[longueur, \n i, j], type = \"n\", col = 1, ylab = paste(param[j], \n \" of reference standard \", i), \n xlab = \"iteration number\", main = paste(\"Thinning interval = \", \n thin.interval, \"\\n Total samplesize kept = \", \n (iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin))\n for (l in 1:length(chain)) {\n lines(x = longueur, y = refstd_Parameters[longueur + \n (iter_chain * (l - 1)), i, \n j], col = l)\n }\n }\n }\n }\n else {\n file.png_RS = paste(\"RefStd trace plots for N =\", \n round((iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin, 0), \".png\")\n png(file.png_RS)#, paper = \"a4\", height = 20)\n param = c(\"Sensitivity\", \"Specificity\")\n par(mfcol = c(5, 2))\n longueur = 1:long\n for (j in 1:2) {\n for (i in 1:rs.length) {\n plot(x = longueur, y = refstd_Parameters[, \n i, j], type = \"l\", col = \"grey\", \n ylab = paste(param[j], \" of reference standard \", \n i), xlab = \"iteration number\", \n main = paste(\"Thinning interval = \", \n thin.interval, \"\\n Total samplesize kept = \", \n (iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin))\n }\n }\n }\n dev.off()\n file.png_RS2 = paste(\"RefStd density plots for N =\", \n round((iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin, 0), \".png\")\n png(file.png_RS2)#, paper = \"a4\", height = 20)\n param = c(\"Sensitivity\", \"Specificity\")\n par(mfcol = c(5, 2))\n longueur = 1:long\n for (j in 1:2) {\n for (i in 1:rs.length) {\n plot(density(refstd_Parameters[, \n i, j]), lwd = 4, type = \"l\", col = \"grey\", \n main = paste(param[j], \" of reference standard \", \n i, \" \\n Thinning interval = \", \n thin.interval, \"\\n Total samplesize kept = \", \n (iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin))\n }\n }\n dev.off()\n }\n }\n else {\n refstd_parameters = matrix(0, ncol = 3, \n nrow = 2)\n rownames(refstd_parameters) = c(\"S2\", \"C2\")\n colnames(refstd_parameters) = c(paste(point_estimate, \n \"estimate\"), \"HPD.low\", \"HPD.high\")\n refstd_parameters[1, 1] <- S2.est\n refstd_parameters[1, 2] <- S2.HPD[1]\n refstd_parameters[1, 3] <- S2.HPD[2]\n refstd_parameters[2, 1] <- C2.est\n refstd_parameters[2, 2] <- C2.HPD[1]\n refstd_parameters[2, 3] <- C2.HPD[2]\n long = length(THETA)\n refstd_Parameters = matrix(0, nrow = long, \n ncol = 2)\n colnames(refstd_Parameters) = c(\"S2\", \"C2\")\n refstd_Parameters[, 1] <- S2\n refstd_Parameters[, 2] <- C2\n if (print_plot == TRUE) {\n if (is.null(chain) == FALSE) {\n file.png_RS = paste(\"RefStd trace plots for N =\", \n round((iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin, 0), \".png\")\n png(file.png_RS)#, paper = \"a4\", height = 20)\n param = c(\"Sensitivity of reference standard\", \n \"Specificity of reference standard\")\n par(mfcol = c(5, 2))\n no_chains = length(chain)\n iter_chain = round((iter.num * nb_chains - \n (burn_in) * nb_chains)/Thin, 0)/no_chains\n longueur = 1:iter_chain\n for (j in 1:2) {\n plot(x = longueur, y = refstd_Parameters[longueur, \n j], type = \"n\", col = 1, ylab = paste(param[j]), \n xlab = \"iteration number\", main = paste(\"Thinning interval = \", \n thin.interval, \"\\n Total samplesize kept = \", \n (iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin))\n for (l in 1:length(chain)) {\n lines(x = longueur, y = refstd_Parameters[longueur + \n (iter_chain * (l - 1)), j], col = l)\n }\n }\n }\n else {\n file.png_RS = paste(\"RefStd trace plots for N =\", \n round((iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin, 0), \".png\")\n png(file.png_RS)#, paper = \"a4\", height = 20)\n param = c(\"Sensitivity of reference standard\", \n \"Specificity of reference standard\")\n par(mfcol = c(5, 2))\n longueur = 1:long\n for (j in 1:2) {\n plot(x = longueur, y = refstd_Parameters[, \n j], type = \"l\", col = \"grey\", ylab = paste(param[j]), \n xlab = \"iteration number\", main = paste(\"Thinning interval = \", \n thin.interval, \"\\n Total samplesize kept = \", \n (iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin))\n }\n }\n dev.off()\n file.png_RS2 = paste(\"RefStd density plots for N =\", \n round((iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin, 0), \".png\")\n png(file.png_RS2)#, paper = \"a4\", height = 20)\n param = c(\"Sensitivity of reference standard\", \n \"Specificity of reference standard\")\n par(mfcol = c(5, 2))\n for (j in 1:2) {\n longueur = 1:long\n plot(density(refstd_Parameters[, j]), \n lwd = 4, type = \"l\", col = \"grey\", \n main = paste(param[j], \" \\n Thinning interval = \", \n thin.interval, \"\\n Total samplesize kept = \", \n (iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin))\n }\n dev.off()\n }\n }\n }\n }\n }\n else {\n if (condInd == TRUE & Gold_Std == FALSE & model == \n 2) {\n if (rs.length != 1) {\n refstd_parameters = array(0, dim = c(rs.length, \n 3, 4), dimnames = list(1:rs.length, c(paste(point_estimate, \n \"estimate\"), \"HPD lower\", \"HPD upper\"), \n c(\"S2\", \"C2\", \"a1\", \"a0\")))\n refstd_parameters[, 1, 1] <- S2.est\n refstd_parameters[, 2, 1] <- S2.HPD[, 1]\n refstd_parameters[, 3, 1] <- S2.HPD[, 2]\n refstd_parameters[, 1, 2] <- C2.est\n refstd_parameters[, 2, 2] <- C2.HPD[, 1]\n refstd_parameters[, 3, 2] <- C2.HPD[, 2]\n refstd_parameters[, 1, 3] <- a1.est\n refstd_parameters[, 2, 3] <- a1.HPD[, 1]\n refstd_parameters[, 3, 3] <- a1.HPD[, 2]\n refstd_parameters[, 1, 4] <- a0.est\n refstd_parameters[, 2, 4] <- a0.HPD[, 1]\n refstd_parameters[, 3, 4] <- a0.HPD[, 2]\n long = length(alpha[, 1])\n refstd_Parameters = array(0, dim = c(long, \n rs.length, 4))\n refstd_Parameters[, , 1] <- S2\n refstd_Parameters[, , 2] <- C2\n refstd_Parameters[, , 3] <- a1\n refstd_Parameters[, , 4] <- a0\n if (print_plot == TRUE) {\n file.png_RS = paste(\"RefStd trace plots for N =\", \n round((iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin, 0), \".png\")\n png(file.png_RS)#, paper = \"a4\", height = 20)\n param = c(\"Sensitivity\", \"Specificity\", \n \"a1\", \"a0\")\n par(mfcol = c(5, 2))\n for (j in 1:4) {\n longueur = 1:long\n for (i in 1:rs.length) {\n plot(x = longueur, y = refstd_Parameters[, \n i, j], type = \"l\", col = \"grey\", \n ylab = paste(param[j], \" of reference standard \", \n i), xlab = \"iteration number\", \n main = paste(\"Thinning interval = \", \n thin.interval, \"\\n Total samplesize kept = \", \n (iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin))\n abline(a = refstd_parameters[i, 2, \n j], b = 0, col = \"green\", lwd = 3)\n abline(a = refstd_parameters[i, 3, \n j], b = 0, col = \"green\", lwd = 3)\n }\n }\n dev.off()\n file.png_RS2 = paste(\"RefStd density plots for N =\", \n round((iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin, 0), \".png\")\n png(file.png_RS2)#, paper = \"a4\", height = 20)\n param = c(\"Sensitivity\", \"Specificity\", \n \"a1\", \"a0\")\n par(mfcol = c(5, 2))\n longueur = 1:long\n for (j in 1:4) {\n for (i in 1:rs.length) {\n plot(density(refstd_Parameters[, i, \n j]), lwd = 4, type = \"l\", col = \"grey\", \n main = paste(param[j], \" of reference standard \", \n i, \" \\n Thinning interval = \", \n thin.interval, \"\\n Total samplesize kept = \", \n (iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin))\n }\n }\n dev.off()\n }\n }\n else {\n refstd_parameters = matrix(0, ncol = 3, nrow = 4)\n rownames(refstd_parameters) = c(\"S2\", \"C2\", \n \"a1\", \"a0\")\n colnames(refstd_parameters) = c(paste(point_estimate, \n \"estimate\"), \"HPD.low\", \"HPD.high\")\n refstd_parameters[1, 1] <- S2.est\n refstd_parameters[1, 2] <- S2.HPD[1]\n refstd_parameters[1, 3] <- S2.HPD[2]\n refstd_parameters[2, 1] <- C2.est\n refstd_parameters[2, 2] <- C2.HPD[1]\n refstd_parameters[2, 3] <- C2.HPD[2]\n refstd_parameters[3, 1] <- a1.est\n refstd_parameters[3, 2] <- a1.HPD[1]\n refstd_parameters[3, 3] <- a1.HPD[2]\n refstd_parameters[4, 1] <- a0.est\n refstd_parameters[4, 2] <- a0.HPD[1]\n refstd_parameters[4, 3] <- a0.HPD[2]\n long = length(THETA)\n refstd_Parameters = matrix(0, nrow = long, \n ncol = 4)\n colnames(refstd_Parameters) = c(\"S2\", \"C2\", \n \"a1\", \"a0\")\n refstd_Parameters[, 1] <- S2\n refstd_Parameters[, 2] <- C2\n refstd_Parameters[, 3] <- a1\n refstd_Parameters[, 4] <- a0\n if (print_plot == TRUE) {\n file.png_RS = paste(\"RefStd trace plots for N =\", \n round((iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin, 0), \".png\")\n png(file.png_RS, pointsize=18)#, paper = \"a4\", height = 20)\n Param = c(\"Sensitivity of reference standard\", \n \"Specificity of reference standard\", \n \"a1\", \"a0\")\n par(mfcol = c(5, 2))\n for (j in 1:4) {\n longueur = 1:long\n plot(x = longueur, y = refstd_Parameters[, \n j], type = \"l\", col = \"grey\", ylab = paste(Param[j]), \n xlab = \"iteration number\", main = paste(\"Thinning interval = \", \n thin.interval, \"\\n Total samplesize kept = \", \n (iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin))\n abline(a = refstd_parameters[j, 2], b = 0, \n col = \"green\", lwd = 3)\n abline(a = refstd_parameters[j, 3], b = 0, \n col = \"green\", lwd = 3)\n }\n dev.off()\n file.png_RS2 = paste(\"Density plots for N =\", \n round((iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin, 0), \".png\")\n png(file.png_RS2)#, paper = \"a4\", height = 20)\n param = c(\"Sensitivity\", \"Specificity\", \n \"a1\", \"a0\")\n par(mfcol = c(5, 2))\n for (j in 1:4) {\n longueur = 1:long\n plot(density(refstd_Parameters[, j]), \n lwd = 4, type = \"l\", col = \"grey\", \n main = paste(param[j], \" of reference standard \", \n j, \" \\n Thinning interval = \", thin.interval, \n \"\\n Total samplesize kept = \", (iter.num * \n nb_chains - (burn_in) * nb_chains)/Thin))\n }\n dev.off()\n }\n }\n }\n }\n parameters = array(0, dim = c(N, 3, 5), dimnames = list(Num_study, \n c(paste(point_estimate, \"estimate\"), \"HPD lower\", \n \"HPD upper\"), c(\"theta\", \"alpha\", \"pi\", \"S1\", \n \"C1\")))\n parameters[, 1, 1] <- theta.est\n parameters[, 2, 1] <- theta.HPD[, 1]\n parameters[, 3, 1] <- theta.HPD[, 2]\n parameters[, 1, 2] <- alpha.est\n parameters[, 2, 2] <- alpha.HPD[, 1]\n parameters[, 3, 2] <- alpha.HPD[, 2]\n parameters[, 1, 3] <- PI.est\n parameters[, 2, 3] <- PI.HPD[, 1]\n parameters[, 3, 3] <- PI.HPD[, 2]\n parameters[, 1, 4] <- S1.est\n parameters[, 2, 4] <- S1.HPD[, 1]\n parameters[, 3, 4] <- S1.HPD[, 2]\n parameters[, 1, 5] <- C1.est\n parameters[, 2, 5] <- C1.HPD[, 1]\n parameters[, 3, 5] <- C1.HPD[, 2]\n long = length(alpha[, 1])\n Parameters = array(0, c(long, N, 5))\n Parameters[, , 1] <- theta\n Parameters[, , 2] <- alpha\n Parameters[, , 3] <- PI\n Parameters[, , 4] <- S1\n Parameters[, , 5] <- C1\n parameter = matrix(0, ncol = 3, nrow = 9)\n rownames(parameter) = c(\"THETA\", \"LAMBDA\", \"beta\", \n \"sigma.alpha\", \"sigma.theta\", \"Sensitivity (summary)\", \"Specificity (summary)\", \n \"Sensitivity (new)\", \"Specificity (new)\")\n colnames(parameter) = c(paste(point_estimate, \"estimate\"), \n \"HPD.low\", \"HPD.high\")\n parameter[1, 1] <- THETA.est\n parameter[1, 2] <- THETA.HPD[1]\n parameter[1, 3] <- THETA.HPD[2]\n parameter[2, 1] <- LAMBDA.est\n parameter[2, 2] <- LAMBDA.HPD[1]\n parameter[2, 3] <- LAMBDA.HPD[2]\n parameter[3, 1] <- beta.est\n parameter[3, 2] <- beta.HPD[1]\n parameter[3, 3] <- beta.HPD[2]\n parameter[4, 1] <- sigma.alpha.est\n parameter[4, 2] <- sigma.alpha.HPD[1]\n parameter[4, 3] <- sigma.alpha.HPD[2]\n parameter[5, 1] <- sigma.theta.est\n parameter[5, 2] <- sigma.theta.HPD[1]\n parameter[5, 3] <- sigma.theta.HPD[2]\n parameter[6, 1] <- S_overall.est\n parameter[6, 2] <- S_overall.HPD[1]\n parameter[6, 3] <- S_overall.HPD[2]\n parameter[7, 1] <- C_overall.est\n parameter[7, 2] <- C_overall.HPD[1]\n parameter[7, 3] <- C_overall.HPD[2]\n parameter[8, 1] <- S1_new.est\n parameter[8, 2] <- S1_new.HPD[1]\n parameter[8, 3] <- S1_new.HPD[2]\n parameter[9, 1] <- C1_new.est\n parameter[9, 2] <- C1_new.HPD[1]\n parameter[8, 3] <- C1_new.HPD[2]\n long = length(THETA)\n Parameter = matrix(0, nrow = long, ncol = 9)\n colnames(Parameter) = c(\"THETA\", \"LAMBDA\", \"beta\", \n \"sigma.alpha\", \"sigma.theta\", \"Sensitivity (summary)\", \"Specificity (summary)\", \n \"Sensitivity (new)\", \"Specificity (new)\")\n Parameter[, 1] <- THETA\n Parameter[, 2] <- LAMBDA\n Parameter[, 3] <- beta\n Parameter[, 4] <- sigma.alpha\n Parameter[, 5] <- sigma.theta\n Parameter[, 6] <- S_overall\n Parameter[, 7] <- C_overall\n Parameter[, 8] <- S1_new\n Parameter[, 9] <- C1_new\n if (print_plot == TRUE) {\n if (is.null(chain) == FALSE) {\n file.png5 = paste(\"Trace plots for N =\", round((iter.num * \n nb_chains - (burn_in) * nb_chains)/Thin, \n 0), \".png\")\n png(file.png5, width=1440, height=1440, pointsize=18)#, paper = \"a4\", height = 20)\n param = c(\"theta\", \"alpha\", \"PI\", \"S1\", \"C1\")\n Param = c(\"Capital Theta\", \"Capital Lambda\", \n \"beta\", \"~sigma[alpha]\", \"~sigma[theta]\", \n \"Sensitivity (summary)\", \"Specificty (summary)\", \"Sensitivity (new)\", \"Specificity (new)\")\n no_chains = length(chain)\n iter_chain = round((iter.num * nb_chains - \n (burn_in) * nb_chains)/Thin, 0)/no_chains\n min_param = c(min(Parameters[, , 1]), min(Parameters[, \n , 2]), min(Parameters[, , 3]), min(Parameters[, \n , 4]), min(Parameters[, , 5]))\n max_param = c(max(Parameters[, , 1]), max(Parameters[, \n , 2]), max(Parameters[, , 3]), max(Parameters[, \n , 4]), max(Parameters[, , 5]))\n dlag = (max_param - min_param)/100\n range_param = numeric()\n for (j in 1:5) {\n range_param = cbind(range_param, seq(min_param[j] + \n dlag[j]/2, max_param[j] - dlag[j]/2, by = dlag[j]))\n }\n par(mfcol = c(5, 2))\n longueur = 1:iter_chain\n for (j in 1:5) {\n for (i in 1:N) {\n plot(x = longueur, y = Parameters[longueur, \n i, j], type = \"n\", col = 1, ylab = paste(param[j], \n \" of study \", i), xlab = \"iteration number\", \n main = paste(\"Thinning interval = \", \n thin.interval, \"\\n Total samplesize kept = \", \n (iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin), ylim = range(range_param[, \n j]))\n for (l in 1:length(chain)) {\n lines(x = longueur, y = Parameters[longueur + \n (iter_chain * (l - 1)), i, j], col = l)\n }\n }\n }\n min_Param = c(min(Parameter[, 1]), min(Parameter[, \n 2]), min(Parameter[, 3]), min(Parameter[, \n 4]), min(Parameter[, 5]), min(Parameter[, \n 6]), min(Parameter[, 7]), min(Parameter[, \n 8]), min(Parameter[, 9]))\n max_Param = c(max(Parameter[, 1]), max(Parameter[, \n 2]), max(Parameter[, 3]), max(Parameter[, \n 4]), max(Parameter[, 5]), max(Parameter[, \n 6]), max(Parameter[, 7]), max(Parameter[, \n 8]), max(Parameter[, 9]))\n dlag = (max_Param - min_Param)/100\n range_Param = numeric()\n for (j in 1:9) {\n range_Param = cbind(range_Param, seq(min_Param[j] + \n dlag[j]/2, max_Param[j] - dlag[j]/2, by = dlag[j]))\n }\n for (j in 1:9) {\n plot(x = longueur, y = Parameter[longueur, \n j], type = \"n\", col = 1, ylab = paste(Param[j]), \n xlab = \"iteration number\", main = paste(\"Thinning interval = \", \n thin.interval, \"\\n Total samplesize kept = \", \n (iter.num * nb_chains - (burn_in) * nb_chains)/Thin), \n ylim = range(range_Param[, j]))\n for (l in 1:length(chain)) {\n lines(x = longueur, y = Parameter[longueur + \n (iter_chain * (l - 1)), j], col = l)\n }\n }\n dev.off()\n }\n else {\n file.png2 = paste(\"Trace plots for N =\", round((iter.num * \n nb_chains - (burn_in) * nb_chains)/Thin, \n 0), \".png\")\n png(file.png2, pointsize=18, width=1440, height=1440)#, paper = \"a4\", height = 20)\n param = c(\"theta\", \"alpha\", \"PI\", \"S1\", \"C1\")\n Param = c(\"Capital Theta\", \"Capital Lambda\", \n \"beta\", \"~sigma[alpha]\", \"~sigma[theta]\", \n \"S Overall\", \"C Overall\", \"S1_new\", \"C1_new\")\n min_param = c(min(Parameters[, , 1]), min(Parameters[, \n , 2]), min(Parameters[, , 3]), min(Parameters[, \n , 4]), min(Parameters[, , 5]), min(Parameters[, \n , 6]), min(Parameters[, , 7]))\n max_param = c(max(Parameters[, , 1]), max(Parameters[, \n , 2]), max(Parameters[, , 3]), max(Parameters[, \n , 4]), max(Parameters[, , 5]), max(Parameters[, \n , 6]), max(Parameters[, , 7]))\n dlag = (max_param - min_param)/100\n range_param = numeric()\n for (j in 1:5) {\n range_param = cbind(range_param, seq(min_param[j] + \n dlag[j]/2, max_param[j] - dlag[j]/2, by = dlag[j]))\n }\n par(mfcol = c(5, 2))\n longueur = 1:long\n for (j in 1:5) {\n for (i in 1:N) {\n plot(x = longueur, y = Parameters[, i, \n j], type = \"l\", col = \"grey\", ylab = paste(param[j], \n \" of study \", i), xlab = \"iteration number\", \n main = paste(\"Thinning interval = \", \n thin.interval, \"\\n Total samplesize kept = \", \n (iter.num * nb_chains - (burn_in) * \n nb_chains)/Thin), ylim = range(range_param[, \n j]))\n abline(a = parameters[i, 2, j], b = 0, \n col = \"green\", lwd = 3)\n abline(a = parameters[i, 3, j], b = 0, \n col = \"green\", lwd = 3)\n }\n }\n min_Param = c(min(Parameter[, 1]), min(Parameter[, \n 2]), min(Parameter[, 3]), min(Parameter[, \n 4]), min(Parameter[, 5]), min(Parameter[, \n 6]), min(Parameter[, 7]), min(Parameter[, \n 8]), min(Parameter[, 9]))\n max_Param = c(max(Parameter[, 1]), max(Parameter[, \n 2]), max(Parameter[, 3]), max(Parameter[, \n 4]), max(Parameter[, 5]), max(Parameter[, \n 6]), max(Parameter[, 7]), max(Parameter[, \n 8]), max(Parameter[, 9]))\n dlag = (max_Param - min_Param)/100\n range_Param = numeric()\n for (j in 1:9) {\n range_Param = cbind(range_Param, seq(min_Param[j] + \n dlag[j]/2, max_Param[j] - dlag[j]/2, by = dlag[j]))\n }\n for (j in 1:9) {\n plot(x = longueur, y = Parameter[, j], type = \"l\", \n col = \"grey\", ylab = paste(Param[j]), xlab = \"iteration number\", \n main = paste(\"Thinning interval = \", thin.interval, \n \"\\n Total samplesize kept = \", (iter.num * \n nb_chains - (burn_in) * nb_chains)/Thin), \n ylim = range(range_Param[, j]))\n abline(a = parameter[j, 2], b = 0, col = \"green\", \n lwd = 3)\n abline(a = parameter[j, 3], b = 0, col = \"green\", \n lwd = 3)\n }\n dev.off()\n }\n\n\n density.plot.name <- \"Density_plots_for_N_\"\n\n # build density pdfs\n file.pdf3 = paste(density.plot.name, round((iter.num * \n nb_chains - (burn_in) * nb_chains)/Thin, 0), \n \".pdf\", sep=\"\")\n pdf(file.pdf3, paper = \"a4\", height = 20)\n make.density.plot(Parameters, Parameter, long, iter.num, \n nb_chains, burn_in, no_chains, Thin, thin.interval, N)\n dev.off()\n\n #file.png3 = paste(density.plot.name, round((iter.num * \n # nb_chains - (burn_in) * nb_chains)/Thin, 0), \n # \".png\", sep=\"\")\n #png(file.png3, width=1440, heigh=1440, pointsize=20)#, paper = \"a4\", height = 20)\n \n # for the kernel plots, we only generate PNGs for overalls\n density.pngs <- make.density.plot(Parameters, Parameter, long, iter.num, \n nb_chains, burn_in, no_chains, Thin, thin.interval, N,\n make.pngs=TRUE, base.file.name=density.plot.name)\n\n image.list <- append(image.list, density.pngs)\n\n\n \n #image.list$density_plots <- c(file.png3, file.pdf3)\n\n # possibly the worse variable naming of all time.\n png(\"Summary_ROC_curve.png\", width=1440, heigh=1440, pointsize=20)\n make.sroc.plot(Parameters, Parameter, data)\n dev.off()\n \n\n pdf(\"Summary_ROC_curve.pdf\")\n make.sroc.plot(Parameters, Parameter, data)\n dev.off()\n image.list[[\"Summary ROC\"]] <- \"Summary_ROC_curve.png\"#c(\"Summary_ROC_curve.png\", \"Summary_ROC_curve.pdf\")\n\n }\n }\n }\n if (Gold_Std == FALSE) {\n Results = list(parameter, parameters, refstd_parameters, \n paste(\"See '\", getwd(), \"' for complete results\", \n sep = \"\"))\n names(Results) = c(\"Between-study parameters\", \"Within-study parameters\", \n \"Reference standard\", \"\")\n }\n else {\n Results = list(parameter, parameters, paste(\"See '\", \n getwd(), \"' for complete results\", sep = \"\"))\n names(Results) = c(\"Between-study parameters\", \"Within-study parameters\", \n \"\")\n }\n\n Results$image.list <- image.list\n return(Results)\n cat(paste(\"See \\\"\", getwd(), \"\\\" for complete results\", sep = \"\"))\n}\n\n\nmake.sens.spec.density.plots <- function(refstd_Parameters, long, iter.num, \n nb_chains, burn_in, no_chains, Thin, thin.interval){\n param = c(\"Sensitivity\", \"Specificity\", \n \"a1\", \"a0\")\n par(mfcol = c(5, 2))\n for (j in 1:4) {\n longueur = 1:long\n plot(density(refstd_Parameters[, j]), \n lwd = 4, type = \"l\", col = \"grey\", \n main = paste(param[j], \" of reference standard \", \n j, \" \\n Thinning interval = \", thin.interval, \n \"\\n Total samplesize kept = \", (iter.num * \n nb_chains - (burn_in) * nb_chains)/Thin))\n }\n}\n\nmake.density.plot <- function(Parameters, Parameter, long, \n iter.num, nb_chains, burn_in, no_chains, Thin, thin.interval, N,\n make.pngs=FALSE, base.file.name=NULL){\n param = c(\"theta\", \"alpha\", \"PI\", \"Sensitivity\", \"Specificity\")\n Param = c(\"Capital Theta\", \"Capital Lambda\", \n \"beta\", \"~sigma[alpha]\", \"~sigma[theta]\", \"Sensitivity (summary)\", \n \"Specificity (summary)\", \"Sensitivity (new)\", \"Specificity (new)\")\n par(mfcol = c(5, 2))\n longueur = 1:long\n\n if (!make.pngs){\n for (j in 1:5) {\n for (i in 1:N) {\n # we're not making the PNGs, so just write to\n # the open device (assumed to be a PDF)\n plot(density(Parameters[, i, j]), lwd = 4, \n type = \"l\", col = \"grey\", main = paste(param[j], \n \" of study \", i, \" \\n Thinning interval = \", \n thin.interval, \"\\n Total samplesize kept = \", \n (iter.num * nb_chains - (burn_in) * nb_chains)/Thin))\n }\n }\n }\n\n # we'll return these\n image.list <- c() \n image.names <- c()\n\n # these, I believe, are the overall stats.\n for (j in 1:9) {\n if (make.pngs){\n file.png = paste(base.file.name, round((iter.num * \n nb_chains - (burn_in) * nb_chains)/Thin, 0), \"_\", Param[j],\n \".png\", sep=\"\")\n # open a device to write to.\n png(file.png, width=480, heigh=480)#, pointsize=20)\n \n image.list <- c(image.list, file.png)\n image.names <- c(image.names, Param[j])\n }\n \n # no generate the density plot.\n plot(density(Parameter[, j]), lwd = 4, type = \"l\", \n col = \"grey\", main = paste(Param[j], \" \\n Thinning interval = \", \n thin.interval, \"\\n Total samplesize kept = \", \n (iter.num * nb_chains - (burn_in) * nb_chains)/Thin))\n \n if (make.pngs){\n # kill the current PNG\n dev.off()\n }\n }\n\n if (make.pngs){\n image.list <- as.list(image.list)\n names(image.list) <- image.names\n image.list\n }\n\n}\n\n\nmake.sroc.plot <- function(Parameters, Parameter, data){\n default.x = range(1, 0)\n default.y = range(0, 1)\n plot(x = default.x, y = default.y, type = \"n\", xlim = rev(range(default.x)), \n xlab = \"\", ylab = \"\")\n title(xlab = \"Specificity\", ylab = \"Sensitivity\", \n cex.lab = 1.5, main = \"Summary ROC curve\")\n Sensi1 = apply(as.matrix(Parameters[, , 4]), 2, median)\n Speci1 = apply(as.matrix(Parameters[, , 5]), 2, median)\n Scale_factor = 10\n symbols(Speci1, Sensi1, circles = rowSums(as.matrix(data[[1]])), \n inches = 0.1 * Scale_factor/7, add = TRUE)\n Ov_Se = 1 - pnorm((median(Parameter[, 1]) - median(Parameter[, \n 2])/2)/exp(median(Parameter[, 3])/2))\n Ov_Sp = pnorm((median(Parameter[, 1]) + median(Parameter[, \n 2])/2)/exp(-median(Parameter[, 3])/2))\n points(Ov_Sp, Ov_Se, pch = 19, cex = 2)\n thet = qnorm((1 - as.matrix(Parameters[, , 4])) + \n 1e-14) * exp(Parameter[, 3]/2) + Parameter[, \n 2]/2\n min_TH = quantile(thet, 0.025)\n max_TH = quantile(thet, 0.975)\n dTH = 5e-05\n TH_range = seq(min_TH + dTH/2, max_TH - dTH/2, dTH)\n S_sroc = 1 - pnorm((TH_range - median(Parameter[, \n 2])/2)/exp(median(Parameter[, 3])/2))\n C_sroc = pnorm((TH_range + median(Parameter[, 2])/2)/exp(-median(Parameter[, \n 3])/2))\n lines(C_sroc, S_sroc, lwd = 3, col = \"black\", lty = 1)\n}\n\n" }, { "alpha_fraction": 0.2589641511440277, "alphanum_fraction": 0.26693227887153625, "avg_line_length": 11.94444465637207, "blob_id": "aa738e4ecccf63f1bb631fbef38e4cd63da04a73", "content_id": "b5c195c76d89d41e92625b13678f8423ee90fd2c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 251, "license_type": "no_license", "max_line_length": 22, "num_lines": 18, "path": "/src/R/HSROC/R/f.R", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "f <-\r\nfunction (a, borne) \r\n{\r\n low = borne[1]\r\n up = borne[2]\r\n if (a > up) {\r\n b = up\r\n }\r\n else {\r\n if (a < low) {\r\n b = low\r\n }\r\n else {\r\n b = a\r\n }\r\n }\r\n return(b)\r\n}\r\n" }, { "alpha_fraction": 0.6718041896820068, "alphanum_fraction": 0.675883948802948, "avg_line_length": 70.19355010986328, "blob_id": "599c6f93aaa7e63733f857968a740238c2b0602e", "content_id": "aab17e50cd8fe0fbf806ece3e08df416dbc6a917", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 2206, "license_type": "no_license", "max_line_length": 419, "num_lines": 31, "path": "/doc/fixed_vs_random.html", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "<html>\n<head>\n<link href=\"css/help.css\" \ntype=text/css rel=stylesheet>\n</head>\n<body>\n<h2><a name=\"top\">Fixed-Effect and Random-Effects Models</a></h2>\n <p>There are two basic types of models used in meta-analysis: fixed-effect models and random-effects models:</p>\n <ul>\n <li>Fixed-effect model - Assume that all studies in the meta-analysis share a common (true) effect size. The only source of uncertainty is the within-study (sampling) error.\n The fixed-effect model can be written as<br>\n <eq><img src=\"images/fixed_effect_model.png\"></eq><br>\n where y<sub><em>i</em></sub> is the observed effect in study <em>i</em>, &theta; is the true effect size, and <em>v<sub>i</sub></em> is the within-study variance (or sampling error) in study <em>i</em>. \n </li>\n <li>Random-effects model - Assume that each study has its own true effect size, which may differ between studies. Sources of uncertainty include both within-study errors and between-study variance - the variance of the true effect sizes. The between-study variance is also known as the <em>heterogeneity</em> of the model.<br><br>\n The random-effects model can be written as<br>\n <eq><img src=\"images/random_effects_model.png\"></eq><br> \n where\n <ul>\n <li>&mu; is the grand (overall) mean</li>\n <li>&alpha;<sub><em>i</em></sub> is the between-study variance - the difference between the true effect in study <em>i</em> and the grand mean</li>\n <li><em>v<sub>i</sub></em> is the within-study variance in study <em>i</em>.</li>\n </ul>\n</ul>\n\n <p>The next two sections describe methods for estimating the summary effect sizes in both the fixed and random-effects cases.</p>\n<p><a href=\"#top\">Back to top</a></p>\n<br>\n <table id=\"nav\" cellspacing=\"0\"><tr valign=\"center\"><td align=\"left\" width=\"20\"><a href=\"diagnostic_analysis.html#top\"><img src=\"images/b_prev.gif\" border=\"0\"></td><td align=\"left\">Example - Diagnostic Data</td></a><td>&nbsp;</td><td align=\"right\">Example - Fixed-Effect Method</td><td align=\"right\" width=\"20\"><a href=\"fixed_effect_example.html#top\"><img src=\"images/b_next.gif\" border=\"0\"></td></a></td></tr></table>\n</body>\n</html>" }, { "alpha_fraction": 0.33441033959388733, "alphanum_fraction": 0.365105003118515, "avg_line_length": 20.10714340209961, "blob_id": "f8b9e6686b0690d9bb3fe6f2c8e3182e41f13b53", "content_id": "b0a28da467a74ba651289784a7aebd166922b7b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 619, "license_type": "no_license", "max_line_length": 61, "num_lines": 28, "path": "/src/R/HSROC/R/truncgamma2.R", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "truncgamma2 <-\r\nfunction (n = 1, l, u, r, y, a, e) \r\n{\r\n alpha.G = sum(y)/2\r\n beta.G = 0.5 * sum(y * (r - a - e)^2)\r\n l1 <- pgamma(q = l, shape = alpha.G, rate = beta.G)\r\n u1 <- pgamma(q = u, shape = alpha.G, rate = beta.G)\r\n x <- runif(n, l1, u1)\r\n if (x != 0 & x < 1e-16) {\r\n x = 1e-16\r\n }\r\n else {\r\n x = x\r\n }\r\n if (x == 0) {\r\n y = u\r\n }\r\n else {\r\n if (x == 1) {\r\n y = l\r\n }\r\n else {\r\n y = qgamma(p = x, shape = alpha.G, rate = beta.G)\r\n }\r\n }\r\n results = c(y, alpha.G, beta.G)\r\n return(results)\r\n}\r\n" }, { "alpha_fraction": 0.6854639649391174, "alphanum_fraction": 0.7085298895835876, "avg_line_length": 58.08407211303711, "blob_id": "dfae445ae8368a7b7b20791ba137c16da241e12d", "content_id": "3bd44aff0617db910d142a481942581c586453fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13353, "license_type": "no_license", "max_line_length": 132, "num_lines": 226, "path": "/src/forms/ui_diagnostic_data_form.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'diagnostic_data_form.ui'\n#\n# Created: Mon May 20 09:43:57 2013\n# by: PyQt4 UI code generator 4.10\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt4 import QtCore, QtGui\n\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n def _fromUtf8(s):\n return s\n\ntry:\n _encoding = QtGui.QApplication.UnicodeUTF8\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig, _encoding)\nexcept AttributeError:\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig)\n\nclass Ui_DiagnosticDataForm(object):\n def setupUi(self, DiagnosticDataForm):\n DiagnosticDataForm.setObjectName(_fromUtf8(\"DiagnosticDataForm\"))\n DiagnosticDataForm.resize(380, 412)\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(_fromUtf8(\":/images/meta.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n DiagnosticDataForm.setWindowIcon(icon)\n self.verticalLayout = QtGui.QVBoxLayout(DiagnosticDataForm)\n self.verticalLayout.setObjectName(_fromUtf8(\"verticalLayout\"))\n self.two_by_two_table = QtGui.QTableWidget(DiagnosticDataForm)\n sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.two_by_two_table.sizePolicy().hasHeightForWidth())\n self.two_by_two_table.setSizePolicy(sizePolicy)\n self.two_by_two_table.setMinimumSize(QtCore.QSize(356, 111))\n self.two_by_two_table.setMaximumSize(QtCore.QSize(356, 111))\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n self.two_by_two_table.setFont(font)\n self.two_by_two_table.setFrameShape(QtGui.QFrame.NoFrame)\n self.two_by_two_table.setFrameShadow(QtGui.QFrame.Plain)\n self.two_by_two_table.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.two_by_two_table.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.two_by_two_table.setAlternatingRowColors(True)\n self.two_by_two_table.setGridStyle(QtCore.Qt.DashLine)\n self.two_by_two_table.setRowCount(3)\n self.two_by_two_table.setColumnCount(3)\n self.two_by_two_table.setObjectName(_fromUtf8(\"two_by_two_table\"))\n item = QtGui.QTableWidgetItem()\n self.two_by_two_table.setVerticalHeaderItem(0, item)\n item = QtGui.QTableWidgetItem()\n self.two_by_two_table.setVerticalHeaderItem(1, item)\n item = QtGui.QTableWidgetItem()\n self.two_by_two_table.setVerticalHeaderItem(2, item)\n item = QtGui.QTableWidgetItem()\n self.two_by_two_table.setHorizontalHeaderItem(0, item)\n item = QtGui.QTableWidgetItem()\n self.two_by_two_table.setHorizontalHeaderItem(1, item)\n item = QtGui.QTableWidgetItem()\n self.two_by_two_table.setHorizontalHeaderItem(2, item)\n self.verticalLayout.addWidget(self.two_by_two_table)\n spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)\n self.verticalLayout.addItem(spacerItem)\n self.horizontalLayout_6 = QtGui.QHBoxLayout()\n self.horizontalLayout_6.setObjectName(_fromUtf8(\"horizontalLayout_6\"))\n self.clear_Btn = QtGui.QPushButton(DiagnosticDataForm)\n self.clear_Btn.setObjectName(_fromUtf8(\"clear_Btn\"))\n self.horizontalLayout_6.addWidget(self.clear_Btn)\n spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)\n self.horizontalLayout_6.addItem(spacerItem1)\n self.back_calc_Btn = QtGui.QPushButton(DiagnosticDataForm)\n self.back_calc_Btn.setEnabled(False)\n self.back_calc_Btn.setObjectName(_fromUtf8(\"back_calc_Btn\"))\n self.horizontalLayout_6.addWidget(self.back_calc_Btn)\n self.verticalLayout.addLayout(self.horizontalLayout_6)\n self.horizontalLayout_3 = QtGui.QHBoxLayout()\n self.horizontalLayout_3.setObjectName(_fromUtf8(\"horizontalLayout_3\"))\n self.prevalence_lbl = QtGui.QLabel(DiagnosticDataForm)\n self.prevalence_lbl.setObjectName(_fromUtf8(\"prevalence_lbl\"))\n self.horizontalLayout_3.addWidget(self.prevalence_lbl)\n self.prevalence_txt_box = QtGui.QLineEdit(DiagnosticDataForm)\n self.prevalence_txt_box.setMinimumSize(QtCore.QSize(75, 0))\n self.prevalence_txt_box.setMaximumSize(QtCore.QSize(75, 16777215))\n self.prevalence_txt_box.setObjectName(_fromUtf8(\"prevalence_txt_box\"))\n self.horizontalLayout_3.addWidget(self.prevalence_txt_box)\n spacerItem2 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)\n self.horizontalLayout_3.addItem(spacerItem2)\n self.verticalLayout.addLayout(self.horizontalLayout_3)\n self.horizontalLayout_2 = QtGui.QHBoxLayout()\n self.horizontalLayout_2.setObjectName(_fromUtf8(\"horizontalLayout_2\"))\n self.label_13 = QtGui.QLabel(DiagnosticDataForm)\n self.label_13.setMinimumSize(QtCore.QSize(50, 0))\n self.label_13.setMaximumSize(QtCore.QSize(50, 20))\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n self.label_13.setFont(font)\n self.label_13.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignHCenter)\n self.label_13.setObjectName(_fromUtf8(\"label_13\"))\n self.horizontalLayout_2.addWidget(self.label_13)\n self.effect_cbo_box = QtGui.QComboBox(DiagnosticDataForm)\n self.effect_cbo_box.setMinimumSize(QtCore.QSize(100, 20))\n self.effect_cbo_box.setMaximumSize(QtCore.QSize(76, 20))\n self.effect_cbo_box.setObjectName(_fromUtf8(\"effect_cbo_box\"))\n self.horizontalLayout_2.addWidget(self.effect_cbo_box)\n spacerItem3 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)\n self.horizontalLayout_2.addItem(spacerItem3)\n self.verticalLayout.addLayout(self.horizontalLayout_2)\n self.groupBox = QtGui.QGroupBox(DiagnosticDataForm)\n self.groupBox.setTitle(_fromUtf8(\"\"))\n self.groupBox.setObjectName(_fromUtf8(\"groupBox\"))\n self.gridLayout = QtGui.QGridLayout(self.groupBox)\n self.gridLayout.setObjectName(_fromUtf8(\"gridLayout\"))\n spacerItem4 = QtGui.QSpacerItem(26, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)\n self.gridLayout.addItem(spacerItem4, 0, 0, 2, 1)\n self.ci_label = QtGui.QLabel(self.groupBox)\n self.ci_label.setObjectName(_fromUtf8(\"ci_label\"))\n self.gridLayout.addWidget(self.ci_label, 0, 2, 1, 1)\n spacerItem5 = QtGui.QSpacerItem(26, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)\n self.gridLayout.addItem(spacerItem5, 0, 3, 2, 1)\n self.horizontalLayout_7 = QtGui.QHBoxLayout()\n self.horizontalLayout_7.setObjectName(_fromUtf8(\"horizontalLayout_7\"))\n self.label_14 = QtGui.QLabel(self.groupBox)\n self.label_14.setMinimumSize(QtCore.QSize(0, 20))\n self.label_14.setMaximumSize(QtCore.QSize(16777215, 20))\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n self.label_14.setFont(font)\n self.label_14.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft)\n self.label_14.setObjectName(_fromUtf8(\"label_14\"))\n self.horizontalLayout_7.addWidget(self.label_14)\n self.effect_txt_box = QtGui.QLineEdit(self.groupBox)\n self.effect_txt_box.setMinimumSize(QtCore.QSize(50, 20))\n self.effect_txt_box.setMaximumSize(QtCore.QSize(50, 20))\n self.effect_txt_box.setObjectName(_fromUtf8(\"effect_txt_box\"))\n self.horizontalLayout_7.addWidget(self.effect_txt_box)\n self.gridLayout.addLayout(self.horizontalLayout_7, 1, 1, 1, 1)\n self.horizontalLayout_5 = QtGui.QHBoxLayout()\n self.horizontalLayout_5.setObjectName(_fromUtf8(\"horizontalLayout_5\"))\n self.label = QtGui.QLabel(self.groupBox)\n self.label.setObjectName(_fromUtf8(\"label\"))\n self.horizontalLayout_5.addWidget(self.label)\n self.low_txt_box = QtGui.QLineEdit(self.groupBox)\n self.low_txt_box.setMinimumSize(QtCore.QSize(50, 20))\n self.low_txt_box.setMaximumSize(QtCore.QSize(50, 20))\n self.low_txt_box.setObjectName(_fromUtf8(\"low_txt_box\"))\n self.horizontalLayout_5.addWidget(self.low_txt_box)\n self.label_4 = QtGui.QLabel(self.groupBox)\n self.label_4.setObjectName(_fromUtf8(\"label_4\"))\n self.horizontalLayout_5.addWidget(self.label_4)\n self.high_txt_box = QtGui.QLineEdit(self.groupBox)\n self.high_txt_box.setMinimumSize(QtCore.QSize(50, 20))\n self.high_txt_box.setMaximumSize(QtCore.QSize(50, 20))\n self.high_txt_box.setObjectName(_fromUtf8(\"high_txt_box\"))\n self.horizontalLayout_5.addWidget(self.high_txt_box)\n self.label_3 = QtGui.QLabel(self.groupBox)\n self.label_3.setObjectName(_fromUtf8(\"label_3\"))\n self.horizontalLayout_5.addWidget(self.label_3)\n self.gridLayout.addLayout(self.horizontalLayout_5, 1, 2, 1, 1)\n self.verticalLayout.addWidget(self.groupBox)\n self.line = QtGui.QFrame(DiagnosticDataForm)\n self.line.setFrameShape(QtGui.QFrame.HLine)\n self.line.setFrameShadow(QtGui.QFrame.Sunken)\n self.line.setObjectName(_fromUtf8(\"line\"))\n self.verticalLayout.addWidget(self.line)\n self.horizontalLayout_4 = QtGui.QHBoxLayout()\n self.horizontalLayout_4.setObjectName(_fromUtf8(\"horizontalLayout_4\"))\n self.inconsistencyLabel = QtGui.QLabel(DiagnosticDataForm)\n font = QtGui.QFont()\n font.setBold(True)\n font.setWeight(75)\n self.inconsistencyLabel.setFont(font)\n self.inconsistencyLabel.setObjectName(_fromUtf8(\"inconsistencyLabel\"))\n self.horizontalLayout_4.addWidget(self.inconsistencyLabel)\n spacerItem6 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)\n self.horizontalLayout_4.addItem(spacerItem6)\n self.buttonBox = QtGui.QDialogButtonBox(DiagnosticDataForm)\n self.buttonBox.setOrientation(QtCore.Qt.Horizontal)\n self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)\n self.buttonBox.setObjectName(_fromUtf8(\"buttonBox\"))\n self.horizontalLayout_4.addWidget(self.buttonBox)\n self.verticalLayout.addLayout(self.horizontalLayout_4)\n self.prevalence_lbl.setBuddy(self.prevalence_txt_box)\n\n self.retranslateUi(DiagnosticDataForm)\n QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8(\"accepted()\")), DiagnosticDataForm.accept)\n QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8(\"rejected()\")), DiagnosticDataForm.reject)\n QtCore.QMetaObject.connectSlotsByName(DiagnosticDataForm)\n DiagnosticDataForm.setTabOrder(self.two_by_two_table, self.clear_Btn)\n DiagnosticDataForm.setTabOrder(self.clear_Btn, self.back_calc_Btn)\n DiagnosticDataForm.setTabOrder(self.back_calc_Btn, self.prevalence_txt_box)\n DiagnosticDataForm.setTabOrder(self.prevalence_txt_box, self.effect_cbo_box)\n DiagnosticDataForm.setTabOrder(self.effect_cbo_box, self.buttonBox)\n\n def retranslateUi(self, DiagnosticDataForm):\n DiagnosticDataForm.setWindowTitle(_translate(\"DiagnosticDataForm\", \"Dialog\", None))\n item = self.two_by_two_table.verticalHeaderItem(0)\n item.setText(_translate(\"DiagnosticDataForm\", \"(test) +\", None))\n item = self.two_by_two_table.verticalHeaderItem(1)\n item.setText(_translate(\"DiagnosticDataForm\", \"(test) -\", None))\n item = self.two_by_two_table.verticalHeaderItem(2)\n item.setText(_translate(\"DiagnosticDataForm\", \"total\", None))\n item = self.two_by_two_table.horizontalHeaderItem(0)\n item.setText(_translate(\"DiagnosticDataForm\", \"(disease) +\", None))\n item = self.two_by_two_table.horizontalHeaderItem(1)\n item.setText(_translate(\"DiagnosticDataForm\", \"(disease) -\", None))\n item = self.two_by_two_table.horizontalHeaderItem(2)\n item.setText(_translate(\"DiagnosticDataForm\", \"total\", None))\n self.clear_Btn.setText(_translate(\"DiagnosticDataForm\", \"Clear Form\", None))\n self.back_calc_Btn.setText(_translate(\"DiagnosticDataForm\", \"back-calculate table\", None))\n self.prevalence_lbl.setText(_translate(\"DiagnosticDataForm\", \"Prevalence\", None))\n self.label_13.setText(_translate(\"DiagnosticDataForm\", \"metric\", None))\n self.ci_label.setToolTip(_translate(\"DiagnosticDataForm\", \"Use the box to the left to set the % confidence interval\", None))\n self.ci_label.setText(_translate(\"DiagnosticDataForm\", \"X% Confidence Interval\", None))\n self.label_14.setText(_translate(\"DiagnosticDataForm\", \"est.\", None))\n self.label.setText(_translate(\"DiagnosticDataForm\", \"[\", None))\n self.label_4.setText(_translate(\"DiagnosticDataForm\", \",\", None))\n self.label_3.setText(_translate(\"DiagnosticDataForm\", \"]\", None))\n self.inconsistencyLabel.setText(_translate(\"DiagnosticDataForm\", \"INCONSISTENT FORM\", None))\n\nimport icons_rc\n" }, { "alpha_fraction": 0.6365264058113098, "alphanum_fraction": 0.6702476739883423, "avg_line_length": 57.78947448730469, "blob_id": "56fa9116389bd56882acf7ade1f41251cf613b15", "content_id": "80514b733b6e21b9f6249e251cf3c5d6be4553be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10053, "license_type": "no_license", "max_line_length": 115, "num_lines": 171, "path": "/src/forms/ui_edit_forest_plot.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'edit_forest_plot.ui'\n#\n# Created: Wed Apr 17 14:37:20 2013\n# by: PyQt4 UI code generator 4.10\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt4 import QtCore, QtGui\n\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n def _fromUtf8(s):\n return s\n\ntry:\n _encoding = QtGui.QApplication.UnicodeUTF8\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig, _encoding)\nexcept AttributeError:\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig)\n\nclass Ui_edit_forest_plot_dlg(object):\n def setupUi(self, edit_forest_plot_dlg):\n edit_forest_plot_dlg.setObjectName(_fromUtf8(\"edit_forest_plot_dlg\"))\n edit_forest_plot_dlg.resize(506, 434)\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(_fromUtf8(\":/images/meta.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n edit_forest_plot_dlg.setWindowIcon(icon)\n self.verticalLayout = QtGui.QVBoxLayout(edit_forest_plot_dlg)\n self.verticalLayout.setObjectName(_fromUtf8(\"verticalLayout\"))\n self.groupBox = QtGui.QGroupBox(edit_forest_plot_dlg)\n self.groupBox.setObjectName(_fromUtf8(\"groupBox\"))\n self.horizontalLayout_2 = QtGui.QHBoxLayout(self.groupBox)\n self.horizontalLayout_2.setObjectName(_fromUtf8(\"horizontalLayout_2\"))\n self.gridLayout_2 = QtGui.QGridLayout()\n self.gridLayout_2.setObjectName(_fromUtf8(\"gridLayout_2\"))\n self.label_2 = QtGui.QLabel(self.groupBox)\n self.label_2.setObjectName(_fromUtf8(\"label_2\"))\n self.gridLayout_2.addWidget(self.label_2, 0, 0, 1, 1)\n self.col1_str_edit = QtGui.QLineEdit(self.groupBox)\n self.col1_str_edit.setObjectName(_fromUtf8(\"col1_str_edit\"))\n self.gridLayout_2.addWidget(self.col1_str_edit, 0, 1, 1, 1)\n self.show_1 = QtGui.QCheckBox(self.groupBox)\n self.show_1.setChecked(True)\n self.show_1.setObjectName(_fromUtf8(\"show_1\"))\n self.gridLayout_2.addWidget(self.show_1, 0, 2, 1, 1)\n self.label_4 = QtGui.QLabel(self.groupBox)\n self.label_4.setObjectName(_fromUtf8(\"label_4\"))\n self.gridLayout_2.addWidget(self.label_4, 1, 0, 1, 1)\n self.col2_str_edit = QtGui.QLineEdit(self.groupBox)\n self.col2_str_edit.setObjectName(_fromUtf8(\"col2_str_edit\"))\n self.gridLayout_2.addWidget(self.col2_str_edit, 1, 1, 1, 1)\n self.show_2 = QtGui.QCheckBox(self.groupBox)\n self.show_2.setChecked(True)\n self.show_2.setObjectName(_fromUtf8(\"show_2\"))\n self.gridLayout_2.addWidget(self.show_2, 1, 2, 1, 1)\n self.label_5 = QtGui.QLabel(self.groupBox)\n self.label_5.setObjectName(_fromUtf8(\"label_5\"))\n self.gridLayout_2.addWidget(self.label_5, 2, 0, 1, 1)\n self.col3_str_edit = QtGui.QLineEdit(self.groupBox)\n self.col3_str_edit.setObjectName(_fromUtf8(\"col3_str_edit\"))\n self.gridLayout_2.addWidget(self.col3_str_edit, 2, 1, 1, 1)\n self.show_3 = QtGui.QCheckBox(self.groupBox)\n self.show_3.setChecked(True)\n self.show_3.setObjectName(_fromUtf8(\"show_3\"))\n self.gridLayout_2.addWidget(self.show_3, 2, 2, 1, 1)\n self.label_6 = QtGui.QLabel(self.groupBox)\n self.label_6.setObjectName(_fromUtf8(\"label_6\"))\n self.gridLayout_2.addWidget(self.label_6, 3, 0, 1, 1)\n self.col4_str_edit = QtGui.QLineEdit(self.groupBox)\n self.col4_str_edit.setObjectName(_fromUtf8(\"col4_str_edit\"))\n self.gridLayout_2.addWidget(self.col4_str_edit, 3, 1, 1, 1)\n self.show_4 = QtGui.QCheckBox(self.groupBox)\n self.show_4.setChecked(True)\n self.show_4.setObjectName(_fromUtf8(\"show_4\"))\n self.gridLayout_2.addWidget(self.show_4, 3, 2, 1, 1)\n self.horizontalLayout_2.addLayout(self.gridLayout_2)\n self.verticalLayout.addWidget(self.groupBox)\n self.gridLayout_4 = QtGui.QGridLayout()\n self.gridLayout_4.setObjectName(_fromUtf8(\"gridLayout_4\"))\n self.label_12 = QtGui.QLabel(edit_forest_plot_dlg)\n self.label_12.setMaximumSize(QtCore.QSize(16777215, 50))\n self.label_12.setObjectName(_fromUtf8(\"label_12\"))\n self.gridLayout_4.addWidget(self.label_12, 0, 0, 1, 1)\n self.label_13 = QtGui.QLabel(edit_forest_plot_dlg)\n self.label_13.setObjectName(_fromUtf8(\"label_13\"))\n self.gridLayout_4.addWidget(self.label_13, 4, 0, 1, 1)\n self.x_ticks_le = QtGui.QLineEdit(edit_forest_plot_dlg)\n self.x_ticks_le.setObjectName(_fromUtf8(\"x_ticks_le\"))\n self.gridLayout_4.addWidget(self.x_ticks_le, 4, 2, 1, 1)\n self.label_14 = QtGui.QLabel(edit_forest_plot_dlg)\n self.label_14.setObjectName(_fromUtf8(\"label_14\"))\n self.gridLayout_4.addWidget(self.label_14, 1, 0, 1, 1)\n self.plot_lb_le = QtGui.QLineEdit(edit_forest_plot_dlg)\n self.plot_lb_le.setObjectName(_fromUtf8(\"plot_lb_le\"))\n self.gridLayout_4.addWidget(self.plot_lb_le, 1, 2, 1, 1)\n self.label_15 = QtGui.QLabel(edit_forest_plot_dlg)\n self.label_15.setObjectName(_fromUtf8(\"label_15\"))\n self.gridLayout_4.addWidget(self.label_15, 2, 0, 1, 1)\n self.plot_ub_le = QtGui.QLineEdit(edit_forest_plot_dlg)\n self.plot_ub_le.setObjectName(_fromUtf8(\"plot_ub_le\"))\n self.gridLayout_4.addWidget(self.plot_ub_le, 2, 2, 1, 1)\n self.label_16 = QtGui.QLabel(edit_forest_plot_dlg)\n self.label_16.setObjectName(_fromUtf8(\"label_16\"))\n self.gridLayout_4.addWidget(self.label_16, 5, 0, 1, 1)\n self.show_summary_line = QtGui.QCheckBox(edit_forest_plot_dlg)\n self.show_summary_line.setText(_fromUtf8(\"\"))\n self.show_summary_line.setChecked(True)\n self.show_summary_line.setObjectName(_fromUtf8(\"show_summary_line\"))\n self.gridLayout_4.addWidget(self.show_summary_line, 5, 2, 1, 1)\n self.x_lbl_le = QtGui.QLineEdit(edit_forest_plot_dlg)\n self.x_lbl_le.setObjectName(_fromUtf8(\"x_lbl_le\"))\n self.gridLayout_4.addWidget(self.x_lbl_le, 0, 2, 1, 1)\n self.verticalLayout.addLayout(self.gridLayout_4)\n self.horizontalLayout_4 = QtGui.QHBoxLayout()\n self.horizontalLayout_4.setObjectName(_fromUtf8(\"horizontalLayout_4\"))\n self.label_3 = QtGui.QLabel(edit_forest_plot_dlg)\n self.label_3.setObjectName(_fromUtf8(\"label_3\"))\n self.horizontalLayout_4.addWidget(self.label_3)\n self.image_path = QtGui.QLineEdit(edit_forest_plot_dlg)\n self.image_path.setObjectName(_fromUtf8(\"image_path\"))\n self.horizontalLayout_4.addWidget(self.image_path)\n self.save_btn = QtGui.QPushButton(edit_forest_plot_dlg)\n self.save_btn.setMaximumSize(QtCore.QSize(25, 16777215))\n self.save_btn.setObjectName(_fromUtf8(\"save_btn\"))\n self.horizontalLayout_4.addWidget(self.save_btn)\n self.verticalLayout.addLayout(self.horizontalLayout_4)\n self.buttonBox = QtGui.QDialogButtonBox(edit_forest_plot_dlg)\n self.buttonBox.setOrientation(QtCore.Qt.Horizontal)\n self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Apply)\n self.buttonBox.setObjectName(_fromUtf8(\"buttonBox\"))\n self.verticalLayout.addWidget(self.buttonBox)\n\n self.retranslateUi(edit_forest_plot_dlg)\n QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8(\"accepted()\")), edit_forest_plot_dlg.accept)\n QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8(\"rejected()\")), edit_forest_plot_dlg.reject)\n QtCore.QMetaObject.connectSlotsByName(edit_forest_plot_dlg)\n\n def retranslateUi(self, edit_forest_plot_dlg):\n edit_forest_plot_dlg.setWindowTitle(_translate(\"edit_forest_plot_dlg\", \"edit forest plot\", None))\n self.groupBox.setTitle(_translate(\"edit_forest_plot_dlg\", \"column labels\", None))\n self.label_2.setText(_translate(\"edit_forest_plot_dlg\", \"col 1 label:\", None))\n self.col1_str_edit.setText(_translate(\"edit_forest_plot_dlg\", \"Studies\", None))\n self.show_1.setText(_translate(\"edit_forest_plot_dlg\", \"show\", None))\n self.label_4.setText(_translate(\"edit_forest_plot_dlg\", \"col 2 label:\", None))\n self.col2_str_edit.setText(_translate(\"edit_forest_plot_dlg\", \"Estimate (Conf. Int.)\", None))\n self.show_2.setText(_translate(\"edit_forest_plot_dlg\", \"show\", None))\n self.label_5.setText(_translate(\"edit_forest_plot_dlg\", \"col 3 label:\", None))\n self.col3_str_edit.setText(_translate(\"edit_forest_plot_dlg\", \"Ev/Trt\", None))\n self.show_3.setText(_translate(\"edit_forest_plot_dlg\", \"show\", None))\n self.label_6.setText(_translate(\"edit_forest_plot_dlg\", \"col 4 label:\", None))\n self.col4_str_edit.setText(_translate(\"edit_forest_plot_dlg\", \"Ev/Ctrl\", None))\n self.show_4.setText(_translate(\"edit_forest_plot_dlg\", \"show\", None))\n self.label_12.setText(_translate(\"edit_forest_plot_dlg\", \"x label:\", None))\n self.label_13.setText(_translate(\"edit_forest_plot_dlg\", \"x ticks:\", None))\n self.x_ticks_le.setText(_translate(\"edit_forest_plot_dlg\", \"[default]\", None))\n self.label_14.setText(_translate(\"edit_forest_plot_dlg\", \"x-axis lower bound\", None))\n self.plot_lb_le.setText(_translate(\"edit_forest_plot_dlg\", \"[default]\", None))\n self.label_15.setText(_translate(\"edit_forest_plot_dlg\", \"x-axis upper bound\", None))\n self.plot_ub_le.setText(_translate(\"edit_forest_plot_dlg\", \"[default]\", None))\n self.label_16.setText(_translate(\"edit_forest_plot_dlg\", \"show summary line:\", None))\n self.x_lbl_le.setText(_translate(\"edit_forest_plot_dlg\", \"[default]\", None))\n self.label_3.setText(_translate(\"edit_forest_plot_dlg\", \"save image to:\", None))\n self.image_path.setText(_translate(\"edit_forest_plot_dlg\", \"./r_tmp/forest.png\", None))\n self.save_btn.setText(_translate(\"edit_forest_plot_dlg\", \"...\", None))\n\nimport icons_rc\n" }, { "alpha_fraction": 0.6670016646385193, "alphanum_fraction": 0.6978224515914917, "avg_line_length": 44.907691955566406, "blob_id": "787ab8aa4e78586a4b143f26630911f9d9a00cc0", "content_id": "f8d2e75f503af63db13d6e50276d14fc58b57476", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2985, "license_type": "no_license", "max_line_length": 94, "num_lines": 65, "path": "/src/forms/ui_network_view.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'network_view_window.ui'\n#\n# Created: Wed Apr 17 14:37:20 2013\n# by: PyQt4 UI code generator 4.10\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt4 import QtCore, QtGui\n\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n def _fromUtf8(s):\n return s\n\ntry:\n _encoding = QtGui.QApplication.UnicodeUTF8\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig, _encoding)\nexcept AttributeError:\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig)\n\nclass Ui_network_view_dialog(object):\n def setupUi(self, network_view_dialog):\n network_view_dialog.setObjectName(_fromUtf8(\"network_view_dialog\"))\n network_view_dialog.resize(625, 555)\n network_view_dialog.setMaximumSize(QtCore.QSize(625, 555))\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Verdana\"))\n network_view_dialog.setFont(font)\n self.frame = QtGui.QFrame(network_view_dialog)\n self.frame.setGeometry(QtCore.QRect(0, 509, 621, 40))\n self.frame.setFrameShape(QtGui.QFrame.StyledPanel)\n self.frame.setFrameShadow(QtGui.QFrame.Raised)\n self.frame.setObjectName(_fromUtf8(\"frame\"))\n self.horizontalLayout = QtGui.QHBoxLayout(self.frame)\n self.horizontalLayout.setObjectName(_fromUtf8(\"horizontalLayout\"))\n self.label_3 = QtGui.QLabel(self.frame)\n self.label_3.setMaximumSize(QtCore.QSize(50, 16777215))\n self.label_3.setObjectName(_fromUtf8(\"label_3\"))\n self.horizontalLayout.addWidget(self.label_3)\n self.outcome_cbo_box = QtGui.QComboBox(self.frame)\n self.outcome_cbo_box.setObjectName(_fromUtf8(\"outcome_cbo_box\"))\n self.horizontalLayout.addWidget(self.outcome_cbo_box)\n self.label_4 = QtGui.QLabel(self.frame)\n self.label_4.setMaximumSize(QtCore.QSize(60, 16777215))\n self.label_4.setObjectName(_fromUtf8(\"label_4\"))\n self.horizontalLayout.addWidget(self.label_4)\n self.follow_up_cbo_box = QtGui.QComboBox(self.frame)\n self.follow_up_cbo_box.setObjectName(_fromUtf8(\"follow_up_cbo_box\"))\n self.horizontalLayout.addWidget(self.follow_up_cbo_box)\n self.network_viewer = QtGui.QGraphicsView(network_view_dialog)\n self.network_viewer.setGeometry(QtCore.QRect(1, 1, 621, 501))\n self.network_viewer.setObjectName(_fromUtf8(\"network_viewer\"))\n\n self.retranslateUi(network_view_dialog)\n QtCore.QMetaObject.connectSlotsByName(network_view_dialog)\n\n def retranslateUi(self, network_view_dialog):\n network_view_dialog.setWindowTitle(_translate(\"network_view_dialog\", \"Network\", None))\n self.label_3.setText(_translate(\"network_view_dialog\", \"outcome:\", None))\n self.label_4.setText(_translate(\"network_view_dialog\", \"follow-up:\", None))\n\n" }, { "alpha_fraction": 0.67719566822052, "alphanum_fraction": 0.70711350440979, "avg_line_length": 52.06944274902344, "blob_id": "562b7ce0320aac5c4b60e6295bbad3a821346a45", "content_id": "80e85d9682ad111a417e9a99cd4d852342b373ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 7788, "license_type": "no_license", "max_line_length": 395, "num_lines": 144, "path": "/src/R/HSROC/man/simdata.Rd", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "\\name{simdata}\r\n\\alias{simdata}\r\n\r\n\\title{Simulate a dataset based on a HSROC model}\r\n\\description{\r\n This function simulates a dataset based on the HSROC diagnostic meta-analysis model. It allows for the reference standard to be imperfect or perfect. \r\n}\r\n\\usage{\r\nsimdata(N, n, n.random = \"FALSE\", sub_rs = NULL, prev, se_ref = NULL, \r\n sp_ref = NULL, T, range.T = c(-Inf, Inf), L, range.L = c(-Inf, Inf), \r\n sd_t, sd_a, b, path = getwd() )\r\n}\r\n\r\n\r\n\\arguments{\r\n \\item{N}{the number of studies to be included in the meta-analysis.} \r\n \\item{n}{numerical vector, possibly a single value, specifying the number of individuals within each study. See details for further explanations. } \r\n \\item{n.random}{if TRUE, the number of individuals within each study is drawn from n with replacement.} \r\n \\item{sub_rs}{a list that specifies the reference standard used by each study. See details for further explanations.}\r\n \\item{prev}{a vector of length \\code{N} giving the prevalence in each study.}\t\r\n \\item{se_ref}{a vector of length equal to the number of reference standards giving the sensitivity for each reference test.}\t\r\n \\item{sp_ref}{a vector of length equal to the number of reference standards giving the specificity for each reference test.}\t\r\n \\item{T}{single numeric value, the overall mean cut-off value to define a positive test.}\r\n \\item{range.T}{a vector of length 2 specifiying a range of values for the individual cut-off \\eqn{\\theta_i}{theta_i}. See details for further explanations. }\t\r\n \\item{L}{single numeric value, the overall difference in mean values (diagnostic accuracy) on the continuous index test result comparing the diseased group and the non-diseased group.}\t\r\n \\item{range.L}{a vector of length 2 specifiying a range of values for the individual difference in mean values (diagnostic accuracy) on the continuous index test result comparing the diseased group and the non-diseased group \\eqn{\\alpha_i}{alpha_i}. See details for further explanations. }\r\n \\item{sd_t}{single numeric value, the between study standard deviation in the cut-off \\eqn{\\theta_i}{theta_i}. }\t\r\n \\item{sd_a}{single numeric value, the between study standard deviation in the mean value of the index test disease group \\eqn{\\alpha_i}{alpha_i} }\t\r\n \\item{b}{single numeric value, the ratio of the continuous standard deviation of the index test results on patients with the disease compared to patients without the disease. }\t\r\n \\item{path}{a character string pointing to the directory where the simulated data will be saved to. }\r\n\r\n}\r\n\r\n\r\n\\value{\r\nA list of the 2x2 tables for each study, the between-study parameters, the within-study parameters and the reference standard.\r\n\r\nText files are created in the \\code{path} directory. These files are :\r\n\r\n\\dQuote{True_values.txt}, reports the within-study parameters \\eqn{\\alpha_i}{alpha_i}, \\eqn{\\theta_i}{theta_i}, sensitivity of test under evaluation ( \\eqn{S_{1i}}{S1_i} ), specificity of test under evaluation ( \\eqn{C_{1i}}{C1_i} ) and prevalence (\\eqn{\\pi_i}{pi_i}) used in the simulation.\r\n\r\n\\dQuote{True_values2.txt}, reports the values of the between-study parameters \\eqn{\\Lambda}{LAMBDA}, standard deviation of \\eqn{\\alpha_i}{alpha_i} ( \\eqn{\\sigma_{\\alpha}}{sigma_alpha} ), \\eqn{\\Theta}{THETA}, standard deviation of \\eqn{\\theta_i}{theta_i} ( \\eqn{\\sigma_{\\theta}}{sigma_theta} ) and \\eqn{\\beta}{beta} used to simulate the data. \r\n\r\n\\dQuote{True_REFSTD.txt}, reports the values of the reference standard used to simulate the data.\r\n\r\n\\dQuote{True_values_index.txt}, reports the variable names of the 3 files described above.\r\n\r\n\t\t. \r\n}\r\n\r\n\r\n\\details{\r\n\r\nThe HSROC model uses the following parametrization : \\eqn{S_i = \\Phi(\\frac{-\\theta_i + \\alpha_i/2}{exp(\\beta/2)})}{S_i = Phi(-(theta_i - alpha_i/2)/exp(beta/2))} and \\eqn{C_i = \\Phi(\\frac{\\theta_i + \\alpha_i/2}{exp(-\\beta/2)})}{C_i = Phi((theta_i + alpha_i/2)/exp(-beta/2))}\r\n\r\nIf \\code{n.random} is \\code{FALSE}, the number of components in \\code{n} must match the value of \\code{N}, unless \\code{n} is equal to a single value. For the latter case, all studies would be assumed to have the same number of individuals, that is \\code{n}.\r\nIf \\code{n.random} is \\code{TRUE}, the number of elements may not necessarly be equal to the value of \\code{N}.\r\n\r\nThe first element of the list-object \\code{sub_rs} corresponds to the number of different reference standards. The default value is 1. The number of additional elements will depend on the value of the first element. There must be as many additional elements in \\code{sub_rs} as there are different reference standards. Assuming the studies are labelled 1, ..., N, \r\neach of these additional elements must be a vector (possibly of length one) taking as their values the labels of the studies sharing the same reference standard. For example, if we have 2 reference tests, the first one applied over studies 1-10 and the second one applied over studies 11-15 then the \\code{sub_rs} list-argument should be of length 3 with the following elements : 2, 1:10, 11:15\r\n \r\nThe \\code{range.T} argument ensures the individual \\eqn{\\theta_i}{theta_i} will be generated within the range provided. If no range is provided by the user (default) the function assumes no restrictions are made on the possible values of \\eqn{\\theta_i}{theta_i}.\r\nThe \\code{range.L} argument ensures the individual \\eqn{\\alpha_i}{alpha_i} will be generated within the range provided. If no range is provided by the user (default) the function assumes no restrictions are made on the possible values of \\eqn{\\theta_i}{theta_i}.\r\n\r\n}\r\n\r\n\r\n\\examples{\r\n\r\n#EXAMPLE 1\r\n#We want to simulate data for 10 studies based on an HSROC model assuming \r\n#each study uses the same imperfect reference standard.\r\n \r\nN = 10\r\nLAMBDA = 2\r\nsd_alpha = 0.75\r\nTHETA = 1.5\r\nsd_theta = 0.5\r\nbeta = 0\r\npi = runif(10,0,1)\r\n\r\nREFSTD = list(1, 1:10) #Only 1 reference standard ...\r\ns2 = c(0.5)\t #Sensitivity of the reference test\r\nc2 = c(0.85) \t #Specificity of the reference test\t\r\n\r\n\r\nsim.data = simdata(N=N, n = c(50,50,60,60,70,70,80,80,90,90), \r\n sub_rs = REFSTD, prev=pi, se_ref=s2, sp_ref=c2, T=THETA, \r\n L=LAMBDA, sd_t=sd_theta, sd_a=sd_alpha, b=beta)\r\n\r\n\r\n\r\n#EXAMPLE 2\r\n#We want to simulate data for 15 studies based on an HSROC model such that \r\n#the first 5 studies share a common reference standard and the remaining \r\n#10 studies also share a common reference standard.\r\n \r\nN = 15\r\nLAMBDA = 3.6\r\nsd_alpha = 1.15\r\nTHETA = 2.3\r\nsd_theta = 0.75\r\nbeta = 0.15\r\npi = runif(15,0.1,0.5)\r\n\r\nREFSTD = list(2, 1:5, 6:15) #Two different reference standards ...\r\ns2 = c(0.40, 0.6)\t #Sensitivity of the reference tests\r\nc2 = c(0.75,0.95) \t #Specificity of the reference tests\t\r\n\r\n#Thus, for the first 5 studies, S2 = 0.40 and C2 = 0.75 while for the last \r\n#10 studies s2 = 0.6 and c2 = 0.95\r\n\r\n\r\nsim.data = simdata(N=N, n=seq(30,120,1), n.random=TRUE, sub_rs = REFSTD, \r\n prev=pi, se_ref=s2, sp_ref=c2, T=THETA, L=LAMBDA, sd_t=sd_theta, \r\n sd_a=sd_alpha, b=beta)\r\n\r\n\r\n#EXAMPLE 3\r\n#Assume the same context as the one in EXAMPLE 2 and let's suppose\r\n#that each individual cut-off theta_i should lie between [-5,5] \r\n \r\nN = 15\r\nLAMBDA = 3.6\r\nsd_alpha = 1.15\r\nTHETA = 2.3\r\nsd_theta = 0.75\r\nbeta = 0.15\r\npi = runif(15,0.1,0.5)\r\n\r\nREFSTD = list(2, 1:5, 6:15) #Two different reference standards ...\r\ns2 = c(0.40, 0.6)\t #Sensitivity of the reference tests\r\nc2 = c(0.75,0.95) \t #Specificity of the reference tests\t\r\n\r\n#Thus, for the first 5 studies, S2 = 0.40 and C2 = 0.75 while for the last \r\n#10 studies s2 = 0.6 and c2 = 0.95\r\n\r\n\r\nsim.data = simdata(N=N, n=seq(30,120,1), n.random=TRUE, sub_rs = REFSTD, \r\n prev=pi, se_ref=s2, sp_ref=c2, T=THETA, range.T=c(-5,5),L=LAMBDA, \r\n sd_t=sd_theta,sd_a=sd_alpha, b=beta)\r\n}\r\n\r\n\\keyword{datagen}\r\n\r\n" }, { "alpha_fraction": 0.4878048896789551, "alphanum_fraction": 0.4878048896789551, "avg_line_length": 11.666666984558105, "blob_id": "c3dc48515aa0addb02c648665523671773ca300b", "content_id": "104b672f3f28ee8dbe678f385463f5da8ceb7c0f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 82, "license_type": "no_license", "max_line_length": 23, "num_lines": 6, "path": "/src/R/HSROC/R/pi.alpha.R", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "pi.alpha <-\r\nfunction (a, b) \r\n{\r\n result = sum(a) + b\r\n return(result)\r\n}\r\n" }, { "alpha_fraction": 0.5735114216804504, "alphanum_fraction": 0.5835449695587158, "avg_line_length": 35.829097747802734, "blob_id": "7274662033808f2f3474e3fcbfa13e4dc0364385", "content_id": "d023511a5229fdb404034d9a49c3e4aefe9c2706", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 31893, "license_type": "no_license", "max_line_length": 209, "num_lines": 866, "path": "/src/bugs_gen2.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "'''\nMetaAnalyst\nTufts Medical Center 2008\nbugs_generator.py aka pyBugs\nIronPython (ipy)\n\n*** DO NOT REMOVE OR ALTER THIS FILE ***\n\nThis module contains static methods for talking to BRUGs via buffer files. It is a convienent \nglue between MetaAnalyst (.NET) and the BRUGS.dll, and includes methods\nboth for generating data input files for BRUGS and for collecting the results back up. \n\nIt is called by MetaAnalyst prior to Bayesian analyses to generate the files necessary to talk to BRUGS,\nas well as afterwards to collect and package the results distributed in their respective buffer text files.\n'''\n\nimport System\nimport System.Collections.ArrayList as arraylist\nimport clr \nimport math\n\n#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \n#\t\t\t\tBRUGS model strings. \t\t\t\t\t\t\t\t\t\t\t\n#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \n\n\n#########################################################\n#\t\t\t\t Continuous string(s)\t\t\t\t\t\t #\n#########################################################\n\n# (linear) control rate, with no covariates\ncont_bayes_two_group_linear_cr_str = \"\"\"\nmodel {\n for( i in 1 : N ) { \n # N is number of studies\n # the precisions for the mean responses \n # which are stochastic nodes!\n \n prec.t[i] <- 1/((sd.t[i]*sd.t[i])/n.t[i])\t\t\n prec.c[i] <- 1/((sd.c[i]*sd.c[i])/n.c[i])\n \n y.t[i] ~dnorm(theta.t[i], prec.t[i])\n y.c[i] ~dnorm(theta.c[i], prec.c[i])\n \n theta.t[i] <- delta.star[i] + theta.c[i]\n theta.c[i] ~ dnorm(muc, inv.tauc2) \t\t\t\t\t\t\t\t\t\t\t\t\t\n \n delta.star[i] ~ dnorm(mu[i], inv.tau2 )\n \n mu[i] <- delta + beta0 *(theta.c[i] - muc)\t\t\n }\n muc ~ dnorm(0.0,precnorm)\n \n delta ~ dnorm(0.0,precnorm)\n beta0 ~ dnorm(0.0,precnorm)\n \n inv.tau2 ~ dgamma(gamma.a,gamma.b)\n tau2 <- 1/inv.tau2\n tau <- sqrt(tau2)\t\t\t\n\n inv.tauc2 ~ dgamma(gamma.c.a, gamma.c.b)\n tauc2 <- 1/inv.tauc2\n tauc <- sqrt(tauc2)\t\t\t\n\n \n\"\"\"\n\n\ncont_bayes_str_one_group = \"\"\"\nmodel {\n for( i in 1 : N ) { # N is number of studies\n # the precisions for the mean responses \n # which are stochastic nodes!\n y.t[i] ~ dnorm(theta[i], prec.y[i])\n prec.y[i] <- 1/var.y[i]\n var.y[i] <- (sd.t[i]*sd.t[i])/n.t[i] \n \n theta[i] ~ dnorm(mu[i], inv.tau2)\n mu[i] <- Ysum \n \n }\n Ysum ~ dnorm(0.0, precnorm)\n inv.tau2 ~ dgamma(gamma.a, gamma.b)\n tau2 <- 1/inv.tau2\n tau <- sqrt(tau2)\t\t\n \n\n\"\"\"\n\ncont_bayes_str_generic = \"\"\"\nmodel {\n for( i in 1 : N ) { # N is number of studies\n # the precisions for the mean responses \n # which are stochastic nodes!\n y.t[i] ~ dnorm(theta[i], prec.y[i])\n prec.y[i] <- 1/var.y[i]\n \n theta[i] ~ dnorm(mu[i], inv.tau2)\n mu[i] <- Ysum \n }\n Ysum ~ dnorm(0.0, precnorm)\n inv.tau2 <- 1/tau2\n tau2 <- tau*tau\n tau ~ dunif(0, 100)\t\t\n\"\"\"\n\n\n\n\n########################################################\n#\t\t\t\t\tBinary string(s)\t\t\t\t\t\t #\n########################################################\n\n# this is the meta-analysis of logOR \n# without covariates\nbinary_bayes_or_model_str = \"\"\"\nmodel {\n for (i in 1:N) {\n rc[i] ~ dbin(pc[i],nc[i])\n rt[i] ~ dbin(pt[i],nt[i])\n logit(pc[i]) <- mu[i]\n logit(pt[i]) <- mu[i] + d[i]\n\n d[i] ~ dnorm(delta,invtau2)\n mu[i] ~ dnorm(mu.bar, invtauc2)\n\n d.exp[i] <- exp(d[i])\n }\n\n delta ~ dnorm(0.0, precnorm)\n delta.exp <- exp(delta)\t\n \n beta ~ dnorm(0.0, precnorm)\n beta1 ~ dnorm(0.0, precnorm)\n \n mu.bar ~ dnorm(0.0, precnorm)\n invtau2 ~ dgamma(gamma.a, gamma.b)\n invtauc2 ~ dgamma(gamma.c.a, gamma.c.b)\n tau2 <- 1/invtau2\t\n tauc2 <- 1/invtauc2\t\n cr.bar <- exp(mu.bar) / (exp(mu.bar) + 1)\n \n\"\"\"\n\nbinary_bayes_one_group_str = \"\"\"\nmodel{\n for (i in 1:N) {\n r[i] ~ dbin(p[i],n[i])\n logit(p[i]) <- mu[i]\n \n mu[i] ~ dnorm(mu.bar, invtau2)\n \n }\n\n mu.bar ~ dnorm(0.0, precnorm)\n invtau2 ~ dgamma(gamma.a, gamma.b)\n tau2 <- 1/invtau2\t\n psum <- exp(mu.bar) / (exp(mu.bar) + 1)\n \n\"\"\"\n\n''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\n#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t #\n#\t\t\t\t\t\t\t\tEnd BRUGS model strings. \t\t\t\t\t #\n#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t #\n''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\n\n\ndef generate_model_file_binary(out_path, metric=\"or\", cr=\"none\", num_covs=0, is_two_group= False):\n if not is_two_group:\n out_str = binary_bayes_one_group_str \n if num_covs:\n cov_str = build_reg_str_for_covs(num_covs) \n out_str = out_str.replace(\"mu[i] ~ dnorm(mu.bar, invtau2)\", \"mu[i] ~ dnorm(mu.bar.star[i], invtau2)\\n\\t\\tmu.bar.star[i]<-mu.bar + \" + cov_str)\n out_str += build_assignments_for_covs(num_covs)\n out_str += \"\\n}\"\n f = file(out_path, 'w')\n f.write(out_str)\n f.close()\n return \n \n out_str = binary_bayes_or_model_str\n #\n # here we modify the model for risk ratio\n #\n if metric == \"rr\":\n out_str = out_str.replace(\"logit(pt[i]) <- mu[i] + d[i]\", \"log(pt[i]) <- mu[i] - log(1 + exp(mu[i])) + d[i]\");\n out_str = out_str.replace(\"cr.bar <- exp(mu.bar) / (exp(mu.bar) + 1)\", \"cr.bar <- exp(mu.bar)\")\n \n #\n # build string for covariate regression\n #\n cov_str = \"\"\n if num_covs:\n cov_str = \" + \" + build_reg_str_for_covs(num_covs)\n \n if cr == \"none\":\n out_str = out_str.replace(\"beta ~ dnorm(0.0, precnorm)\", \"\")\n out_str = out_str.replace(\"beta1 ~ dnorm(0.0, precnorm)\", \"\")\n if num_covs:\n out_str = out_str.replace(\"d[i] ~ dnorm(delta,invtau2)\", \"d[i] <- d.star[i] \" + cov_str + \"\\n\\t\\td.star[i] ~ dnorm(delta,invtau2)\")\n \n elif cr == \"linear\":\n out_str = out_str.replace(\"beta1 ~ dnorm(0.0, precnorm)\", \"\")\n out_str = out_str.replace(\"d[i] ~ dnorm(delta,invtau2)\", \"d[i] <- d.star[i] + beta*(mu[i] - mu.bar)\" + cov_str + \"\\n\\t\\td.star[i] ~ dnorm(delta,invtau2)\")\n else:\n # quadratic\n out_str = out_str.replace(\"d[i] ~ dnorm(delta,invtau2)\", \"d[i] <- d.star[i] + beta*(mu[i] - mu.bar) + beta1*((mu[i] - mu.bar) * (mu[i] - mu.bar)) \" + cov_str + \"\\n\\t\\td.star[i] ~ dnorm(delta,invtau2)\")\n \n if num_covs:\n out_str += build_assignments_for_covs(num_covs)\n \n out_str += \"\\n}\"\n\n f = file(out_path, 'w')\n f.write(out_str)\n f.close()\n \ndef build_reg_str_for_covs(num_covs):\n return \"+\".join([\"cov%s * (x%s[i] - mean(x%s[]))\" % (i, i, i) for i in range(num_covs)])\n \ndef build_assignments_for_covs(num_covs):\n return \"\\n\".join([\"\\tcov%s ~ dnorm(0.0, precnorm)\" % i for i in range(num_covs)])\t\n\ndef generate_data_file_binary_one_group(out_path, num_studies, r, n, prior_on_q, prior_on_lam, covariates, precnorm=.000001):\n '''\n r -- numerator (events)\n n -- denominator (sample size)\n '''\n precnorm = \"%15.10f\" % precnorm\n bin_data = []\n f = file(out_path, 'w')\n r = build_brugs_list_from_vector(\"r\", r)\n n = build_brugs_list_from_vector(\"n\", n)\n f.write(\"list(\")\n f.write(\", \".join([r, n]))\n\n all_covs = []\n for cov_index in range(covariates.GetLength(1) - 1):\n all_covs.append(build_brugs_list_from_vector(\"x%s\"%cov_index, get_col(covariates, cov_index+1)))\n\n if (covariates.GetLength(1) - 1):\n f.write(\", \")\n f.write(\", \".join(all_covs))\n\n f.write(\", precnorm=%s, gamma.a=%s, gamma.b=%s, N=%s)\" % \n (precnorm, prior_on_q, prior_on_lam, num_studies))\n f.close()\n\n\ndef generate_all_data_files_diagnostic(out_dir, num_studies, tps, fps, fns, tns, q_priors, lam_priors, xt):\n num_diag_outcomes = 8\n metric_functions_one_grp = [\"specificity(tn, fp)\", \"sensitivity(tp, fn)\", \"ppv(tp, fp)\", \"npv(tn, fn)\", \"accuracy(tp, fp, fn, tn)\"]\n \n for metric_index in range(len(metric_functions_one_grp)):\n metric_f = metric_functions_one_grp[metric_index]\n r, n = [], []\n for i in range(num_studies):\n tp, fp, fn, tn = tps[i], fps[i], fns[i], tns[i]\n r_i, n_i = eval(metric_f)\n r.append(r_i) # outcome metric for study\n n.append(n_i) # study size\n generate_data_file_binary_one_group(out_dir + metric_f.split(\"(\")[0] + \"//data.txt\", num_studies, r, n, q_priors[metric_index], lam_priors[metric_index], xt)\n \n metric_functions_two_grp = [\"dor(tp, fp, fn, tn)\", \"lrp(tp, fp, fn, tn)\", \"lrn(tp, fp, fn, tn)\"]\n for metric_index in range(len(metric_functions_two_grp)):\n metric_f = metric_functions_two_grp[metric_index]\n rt, nt, rc, nc = [], [], [], []\n for i in range(num_studies):\n tp, fp, fn, tn = tps[i], fps[i], fns[i], tns[i]\n rt_i, nt_i, rc_i, nc_i = eval(metric_f)\n rt.append(rt_i)\n nt.append(nt_i)\n rc.append(rc_i)\n nc.append(nc_i)\n q_tuple = [q_priors[metric_index] for i in range(2)]\n lam_tuple = [lam_priors[metric_index] for i in range(2)]\n generate_data_file_binary(out_dir + metric_f.split(\"(\")[0] + \"//data.txt\", num_studies, rt, nt, rc, nc, q_tuple , lam_tuple , xt)\n \n \n# \n# These return a tuple; the first entry is the numerator (r) the second, the denominator (n)\n#\ndef sensitivity(tp, fn):\n return (tp , (tp + fn))\n \ndef specificity(tn, fp):\n return (tn , (fp + tn))\n \ndef ppv(tp, fp):\n return (tp , (tp + fp))\n \ndef npv(tn, fn):\n return (tn , (fn + tn))\n \ndef accuracy(tp, fp, fn, tn):\n return ((tp + tn) , (fp + tp + fn + tn))\n \n#\n# Thesee return a fourple; rt, nt, rc, nc\n#\ndef dor(tp, fp, fn, tn):\n #return ((tp * tn) , (fp * fn))\n return (tp, tp + fn, fp, fp + tn)\n \ndef lrp(tp, fp, fn, tn):\n #return (sensitivity(tp, fn), (1-specificity(tn, fp)))\n return (tp, tp + fn, fp, fp + tn)\n \ndef lrn(tp, fp, fn, tn):\n #return ((1 - sensitivity(tp, fn)), specificity(tn, fp))\n return (fn, fn + tp, tn, tn + fp)\n\ndef generate_data_file_binary(out_path, num_studies, rt, Nt, rc, Nc, priors_on_q, priors_on_lam, covariates, precnorm=.000001):\n '''\n rt is numerator in treatment (number of events in treated group), Nt is the total number of subjects in the treated group; rc is numerator in control\n Nc is the total number of subjects in the control group.\n '''\n precnorm = \"%15.10f\" % precnorm\n bin_data = []\n f = file(out_path, 'w')\n f.write(\"list(\")\n rt = build_brugs_list_from_vector(\"rt\", rt)\n nt = build_brugs_list_from_vector(\"nt\", Nt)\n rc = build_brugs_list_from_vector(\"rc\", rc)\n nc = build_brugs_list_from_vector(\"nc\", Nc)\n \n # here we deal with covariates\n all_covs = []\n # the covariates (or, xi) double[,] matrix has a 1s column at index 0. we're not interested in this.\n for cov_index in range(covariates.GetLength(1) - 1):\n all_covs.append(build_brugs_list_from_vector(\"x%s\"%cov_index, get_col(covariates, cov_index+1)))\n vecs_for_brugs =[rt, nt, rc, nc]\n vecs_for_brugs.extend(all_covs)\n \n f.write(\", \".join(vecs_for_brugs))\n f.write(\", precnorm=%s, gamma.a=%s, gamma.b=%s, gamma.c.a=%s, gamma.c.b=%s, N=%s)\" % \n (precnorm, priors_on_q[0], priors_on_lam[0], priors_on_q[1], priors_on_lam[1], num_studies))\n f.close()\n \n\n# \n# Use generate_all_inits_using_values (getting init values via Chris' method) instead!\n#\ndef generate_all_inits_for_binary(outdir, N, num_chains=3):\n # get bugsy style list for mu and d inits (these need a list of values)\n list_chains = generate_inits_for_vars_that_need_list(num_chains, [\"mu\", \"d\"], N)\n \n # now generate initial values for variables that only need a scalar\n scalar_chains = generate_inits_for_vars_that_need_scalars(num_chains, [\"delta\", \"mu.bar\", \"invtau2\", \"invtauc2\"], [0.0, 0.0, 0.1, .05])\n # now comine them\n for list_inits, scalar_inits in zip(list_chains, scalar_chains):\n list_inits.append(scalar_inits)\n write_out_init_chains(outdir, list_inits)\n \n \ndef generate_all_inits_for_cont(outdir, N, cr, num_covs, is_two_group, generic=False):\n num_chains=3\n scalar_vars, scalar_vals = None, None\n list_chains = None\n if is_two_group:\n list_chains = generate_inits_for_vars_that_need_list(num_chains, [\"delta.star\", \"theta.c\"], N)\n scalar_vars = [\"delta\", \"muc\", \"inv.tau2\", \"inv.tauc2\"]\n scalar_vals = [0.0, 0.0, 0.1, 0.05]\n if cr == \"linear\":\n scalar_vars.append(\"beta0\")\n scalar_vals.append(0.0)\n else:\n list_chains = generate_inits_for_vars_that_need_list(num_chains, [\"theta\"], N)\n if not generic:\t\n scalar_vars = [\"Ysum\", \"inv.tau2\"]\n scalar_vals = [0.0, 0.1]\n else:\n scalar_vars = [\"Ysum\", \"tau\"]\n scalar_vals = [0.0, 1.0]\n\n for cov in range(num_covs):\n scalar_vars.append(\"cov%s\"%cov)\n scalar_vals.append(0.0)\n scalar_chains = generate_inits_for_vars_that_need_scalars(num_chains, scalar_vars, scalar_vals)\n for list_inits, scalar_inits in zip(list_chains, scalar_chains):\n list_inits.append(scalar_inits)\n write_out_init_chains(outdir, list_chains)\n\n\ndef generate_all_inits_using_values(phi_mat, xi_mat, outdir, num_chains, num_covs, cr, is_two_group):\n all_chains = massage_inits_to_bugs_format_binary(num_chains, phi_mat, xi_mat, num_covs, cr, is_two_group)\n write_out_init_chains(outdir, all_chains)\n\n\ndef write_out_init_chains(outdir, list_of_chains):\n for chain_index in range(len(list_of_chains)):\n f = file(outdir + \"//inits%s.txt\" % (chain_index+1), 'w')\n f.write(\"list(\" + \",\".join(list_of_chains[chain_index]) + \")\")\n f.close()\n\ndef massage_inits_to_bugs_format_binary(num_chains, phi_mat, xi_mat, num_covs, cr, is_two_group):\n '''\n This method takes the output from Chris' two intialization generating routines and outputs them to BUGS\n friendly files. \n \n phi_mat - these are the scalar (I think of them, perhaps incorrectly, as initial values for 'summary' values)\n phi_mat is indexed by x,y where x is the parameter index and y is the chain index \n xi_mat - these are arrays, i.e., study level parameters. however this gets passed in as a three-dimensional\n matrix, indexed by study, parameter, and chain, respectively. that is, xi_mat[x,y,z] gives the initial\n value for study x, parameter y, for chain z. \n both of the above parameters are assumed to have three chains. this is hard-coded into Chris' code. from\n these chains we can build additional chains.\n '''\n list_of_chains = []\n number_of_studies = xi_mat.GetLength(0)\n xi_parameter_names = [\"mu\"]\n if is_two_group:\n xi_parameter_names.append(\"d\") # this will probably need to be changed for other (non-binary) datatypes\n\n phi_mat_parameter_names = None\n \n if is_two_group:\n phi_mat_parameter_names = [\"mu.bar\", \"delta\"]\n else:\n phi_mat_parameter_names = [\"mu.bar\"]\n \n for cov_num in range(num_covs):\n phi_mat_parameter_names.append(\"cov%s\" % cov_num)\n \n if cr > 0:\n # we need beta!\n phi_mat_parameter_names.append(\"beta\")\n \n if cr == 2:\n phi_mat_parameter_names.append(\"beta1\")\n\n if is_two_group:\n phi_mat_parameter_names.extend([\"invtauc2\", \"invtau2\"])\n else:\n phi_mat_parameter_names.append(\"invtau2\")\n\n for chain in range(xi_mat.GetLength(2)):\n # generate the whole chain at once\n chain_str = []\n for param in range(xi_mat.GetLength(1)):\n # first generate the inits for the values in the xi_mat; i.e., study-level initial values\n cur_param_vector= [xi_mat[study, param, chain] for study in range(number_of_studies)]\n chain_str.append(build_brugs_list_from_vector(xi_parameter_names[param], cur_param_vector))\n phi_vals_for_cur_chain = [phi_mat[param, chain] for param in range(phi_mat.GetLength(0))]\n chain_str.append(\", \".join([\"%s = %15.10f\" % (parameter, val) for parameter, val in zip(phi_mat_parameter_names, phi_vals_for_cur_chain)]))\n list_of_chains.append(chain_str)\n return list_of_chains\n \n\ndef generate_inits_for_vars_that_need_scalars(num_chains, parameters, init_vals):\n all_chains = []\n for chain in range(num_chains):\n all_chains.append(\", \".join([\"%s = %15.10f\" % (parameter, val) for parameter, val in zip(parameters, init_vals)]))\n return all_chains\n \n\n \ndef generate_inits_for_vars_that_need_list(num_chains, parameters, num_vals_needed):\n rand = System.Random()\n first_three_vals = [0.0, -.1, .1]\n all_chains = []\n for chain_i in range(num_chains):\n chain_str = []\n for p in parameters:\n if chain_i < 3:\n val = first_three_vals[chain_i]\n else:\n val = .1 * rand.NextDouble()\n if (rand.NexDouble > .5):\n val = -1 * val\n chain_str.append(build_brugs_list_from_vector(p, [\"%15.10f\" % val for i in range(num_vals_needed)]))\n all_chains.append(chain_str)\n return all_chains\n\n\ndef build_results_list_for_cont_bayes(results_dir, N, num_covs, nphi, cr, is_two_group):\n results = arraylist()\n \n overall_effects = System.Array.CreateInstance(float, nphi, 6)\n vars_for_overall_table = None\n if is_two_group:\n vars_for_overall_table = [\"muc\", \"delta\"]\n else:\n vars_for_overall_table = [\"Ysum\"]\n \n for c in range(num_covs):\n vars_for_overall_table.append(\"cov%s\" % c) \n \n if cr > 0:\n vars_for_overall_table.append(\"beta0\")\n \n add_tau = [\"tau2\"]\n if is_two_group:\n add_tau.insert(0, \"tauc2\")\n vars_for_overall_table.extend(add_tau)\n \n for i in range(len(vars_for_overall_table)):\n insert_arr_into_mat(overall_effects, i, ma_format_arr(get_first_line_arr(results_dir + \"//%s//buffer.txt\" % vars_for_overall_table[i])))\n results.Add(overall_effects)\n\n # build the treatment effects table -- this is delta.star\n treat_matrix = System.Array.CreateInstance(float, N, 6)\n treat_rows = None\n if is_two_group:\n treat_rows = build_arrs_from_file(results_dir + \"//delta.star//buffer.txt\")\n else:\n treat_rows = build_arrs_from_file(results_dir + \"//theta//buffer.txt\")\n for i in range(len(treat_rows)):\n insert_arr_into_mat(treat_matrix, i, treat_rows[i])\n results.Add(treat_matrix)\n \n # now build the posterior control group table -- this is 'pc'\n if is_two_group:\n pc_matrix = System.Array.CreateInstance(float, N, 6)\n pc_rows = build_arrs_from_file(results_dir + \"//theta.c//buffer.txt\")\n for i in range(len(pc_rows)):\n insert_arr_into_mat(pc_matrix, i, pc_rows[i])\n results.Add(pc_matrix)\n\n return results\n\n \ndef build_overall_table(results_dir, nphi, num_covs, cr, is_two_group):\n # overall effects matrix\n overall_effects = System.Array.CreateInstance(float, nphi, 6)\n vars_for_overall_table = None\n \n if is_two_group:\n vars_for_overall_table = [\"cr.bar\", \"delta.exp\"]\n if cr > 0:\n vars_for_overall_table.append(\"beta\")\n \n if cr == 2:\n vars_for_overall_table.append(\"beta1\")\n else:\n vars_for_overall_table = [\"psum\", \"mu.bar\"]\n\n # now add the covariates\n for c in range(num_covs):\n vars_for_overall_table.append(\"cov%s\" % c) \n \n add_tau = [\"tau2\"]\n if is_two_group:\n add_tau.insert(0, \"tauc2\")\n vars_for_overall_table.extend(add_tau)\n \n for i in range(len(vars_for_overall_table)):\n insert_arr_into_mat(overall_effects, i, ma_format_arr(get_first_line_arr(results_dir + \"//%s//buffer.txt\" % vars_for_overall_table[i])))\n\n return overall_effects\n \n \ndef build_results_list_for_binary_bayes(results_dir, N, num_covs, nphi, cr, is_two_group):\n '''\n Builds and returns a results ArrayList from the BRUGs output (the top level is assumed to be the results_dir parameter).\n This list constructed so as to be ready to be passed off to the ReportGenerator on the C# side. \n '''\n results = arraylist()\n results.Add(build_overall_table(results_dir, nphi, num_covs, cr, is_two_group))\n \n # now build the posterior control group table -- this is 'pc'\n if is_two_group:\n pc_matrix = System.Array.CreateInstance(float, N, 6)\n pc_rows = build_arrs_from_file(results_dir + \"//pc//buffer.txt\")\n for i in range(len(pc_rows)):\n insert_arr_into_mat(pc_matrix, i, pc_rows[i])\n results.Add(pc_matrix)\n \n # finally, build the treatment effects table -- this is d.exp\n treat_matrix = System.Array.CreateInstance(float, N, 6)\n treat_rows = None\n mstr = \"or\"\n if is_two_group and mstr != \"rd\":\n treat_rows = build_arrs_from_file(results_dir + \"//d.exp//buffer.txt\")\n else:\n treat_rows = build_arrs_from_file(results_dir + \"//p//buffer.txt\")\n for i in range(len(treat_rows)):\n insert_arr_into_mat(treat_matrix, i, treat_rows[i])\n results.Add(treat_matrix)\n return results\n \ndef invlogit(x):\n return math.exp(x) / (1 + math.exp(x))\n \ndef build_diagnostic_bayes_outcome_table(results_dir, outcome, N, cr, num_covs, two_group):\n # intercept + covariates + tau2\n total_rows = N + 2 + num_covs \n \n vars_for_overall_table = None\n if two_group:\n vars_for_overall_table = [\"d.exp\"]\n else:\n vars_for_overall_table = [\"mu.bar\"]\n \n if cr == \"linear\":\n total_rows += 1\n vars_for_overall_table.append(\"beta\")\n\n vars_for_overall_table.append(\"tau2\")\n pstr = \"p\"\n if two_group:\n pstr = \"d.exp\"\n vars_for_overall_table.append(\"tau2\")\n total_rows += 1\n \n outcome_table = System.Array.CreateInstance(float, total_rows, 6)\n \n for cov in range(num_covs):\n vars_for_overall_table.append(\"cov%s\" % cov)\n \n\n # first insert the study data\n file = open(results_dir + outcome + \"//%s//buffer.txt\" % pstr, 'r')\n lines = file.readlines()[1:] # drop the headers\n file.close()\n \n for row_index in range(len(lines)):\n row = lines[row_index]\n insert_arr_into_mat(outcome_table, row_index, ma_format_arr(parse_numbers_from_line(row)))\n\n # add the 'overall' variables\n for i in range(len(vars_for_overall_table)):\n insert_arr_into_mat(outcome_table, N+i, ma_format_arr(get_first_line_arr(results_dir + outcome + \"//%s//buffer.txt\" % vars_for_overall_table[i])))\n \n return outcome_table\n\n \ndef build_results_list_for_diagnostic_bayes(results_dir, metrics, N, num_covs, nphis, cr):\n all_results = arraylist()\n results = arraylist()\n forest_plot_stuff = arraylist()\n outcome_index = 0\n for outcome in metrics:\n is_two_group = False\n if outcome in [\"dor\", \"lrp\", \"lrn\"]:\n is_two_group = True\n cur_outcome_table = build_diagnostic_bayes_outcome_table(results_dir, outcome, N, cr, num_covs, is_two_group)\n results.Add(cur_outcome_table)\n \n #\n # now deal with forest plot stuff\n #\n # this will include: [lowers, means, uppers, overalls]\n stuff_for_this_forest_plot = arraylist() \n # here we build double[] arrays to hand off for the forest plot. we need lowers, means and uppers, as well\n # as overalls, for each metric. first we add results for each study.\n lowers, means, uppers = System.Array.CreateInstance(float, N), System.Array.CreateInstance(float, N), System.Array.CreateInstance(float, N)\n lower_index, mean_index, upper_index = 0, 2, 4\n for study_i in range(N):\n lowers[study_i] = cur_outcome_table[study_i, lower_index]\n means[study_i] = cur_outcome_table[study_i, mean_index]\n uppers[study_i] = cur_outcome_table[study_i, upper_index]\n \n \n for double_arr in [lowers, means, uppers]:\n stuff_for_this_forest_plot.Add(double_arr)\n \n # now add the overall estimate\n overall = System.Array.CreateInstance(float, 3)\n # the summary statistic is psum if 'one group' model; delta.exp otherwise\n overall_var = \"psum\"\n if is_two_group:\n overall_var = \"delta.exp\"\n #overall_var = \"delta\"\n overall_arr = ma_format_arr(get_first_line_arr(results_dir + outcome + \"//%s//buffer.txt\" % overall_var))\n\n overall[0] = overall_arr[2]\n overall[1] = overall_arr[0]\n overall[2] = overall_arr[4]\n stuff_for_this_forest_plot.Add(overall)\n forest_plot_stuff.Add(stuff_for_this_forest_plot)\n outcome_index += 1\n all_results.Add(results)\n all_results.Add(forest_plot_stuff)\n return all_results\n\n\n\ndef generate_model_file_cont(out_path, cr, num_covs, is_two_group, generic):\n out_str = None\n # build string for covariate regression\n cov_str = \"\"\n if num_covs:\n cov_str = \" + \" + build_reg_str_for_covs(num_covs)\n \n if is_two_group:\n out_str = cont_bayes_two_group_linear_cr_str\n if cr == \"none\":\n out_str = out_str.replace(\"beta0 ~ dnorm(0.0,precnorm)\", \"\")\n if num_covs:\n out_str = out_str.replace(\"mu[i] <- delta + beta0 *(theta.c[i] - muc)\", \"mu[i] <- delta\" + cov_str)\n out_str += build_assignments_for_covs(num_covs)\n else:\n # no control rate, no covariates. \n out_str = out_str.replace(\"delta.star[i] ~ dnorm(mu[i], inv.tau2 )\", \"delta.star[i] ~ dnorm(delta, inv.tau2 )\")\n out_str = out_str.replace(\"mu[i] <- delta + beta0 *(theta.c[i] - muc)\", \"mu[i] <- delta\")\n else:\n if num_covs:\n out_str = out_str.replace(\"mu[i] <- delta + beta0 *(theta.c[i] - muc)\", \"mu[i] <- delta + beta0 *(theta.c[i] - muc)\" + cov_str)\n out_str += build_assignments_for_covs(num_covs)\n \n else:\n # one group\n out_str = cont_bayes_str_one_group\n if generic:\n out_str = cont_bayes_str_generic\n if num_covs:\n out_str = out_str.replace(\"mu[i] <- Ysum \", \"mu[i] <- Ysum \" + cov_str)\n out_str += build_assignments_for_covs(num_covs)\n \n out_str += \"\\n}\"\n f = open(out_path, 'w')\n f.write(out_str)\n f.close()\n \n \n\ndef generate_data_file_cont(out_path, num_studies, yt_vec, yc_vec, sdt_vec, sdc_vec, nt_vec, nc_vec, priors_on_q, priors_on_lam, covariates, precnorm=.000001):\n '''\n out_path -- data file will be written here\n num_studies -- number of studies total (equal to len(*_vec), or, N)\n yt_vec -- vector of means in treatment groups over all N studies\n yc_vec -- ditto, for control\n sdt_vec -- vector of standard deviations in treatment groups over all N studies\n sdc_vec -- ditto, for control\n nt_vec -- number of subjects in treated groups over all N studies\n nc_vec -- ditto, for control\n covs -- matrix of covariates\n '''\n precnorm = \"%15.10f\" % precnorm\n cont_data = []\n f = file(out_path, 'w')\n f.write(\"list(\")\n yt = build_brugs_list_from_vector(\"y.t\", yt_vec)\n yc = build_brugs_list_from_vector(\"y.c\", yc_vec)\n sdt = build_brugs_list_from_vector(\"sd.t\", sdt_vec)\n sdc = build_brugs_list_from_vector(\"sd.c\", sdc_vec)\n nt = build_brugs_list_from_vector(\"n.t\", nt_vec)\n nc = build_brugs_list_from_vector(\"n.c\", nc_vec)\n \n # here we deal with covariates\n all_covs = []\n # the covariates (or, xi) double[,] matrix has a 1s column at index 0. we're not interested in this.\n for cov_index in range(covariates.GetLength(1) - 1):\n all_covs.append(build_brugs_list_from_vector(\"x%s\"%cov_index, get_col(covariates, cov_index+1)))\n vecs_for_brugs =[yt, yc, sdt, sdc, nt, nc]\n vecs_for_brugs.extend(all_covs)\n \n f.write(\", \".join(vecs_for_brugs))\n f.write(\", precnorm=%s, gamma.a=%s, gamma.b=%s, gamma.c.a=%s, gamma.c.b=%s, N=%s)\" % \n (precnorm, priors_on_q[0], priors_on_lam[0], priors_on_q[1], priors_on_lam[1], num_studies))\n f.close()\n \n\ndef generate_data_file_cont_generic(out_path, num_studies, yt_vec, yt_var_vec, prior_on_q, prior_on_lam, covariates, precnorm=.000001):\n '''\n out_path -- data file will be written here\n num_studies -- number of studies total (equal to len(*_vec), or, N)\n yt_vec -- vector of means in treatment groups over all N studies\n\n covs -- matrix of covariates\n '''\t\n precnorm = \"%15.10f\" % precnorm\n cont_data = []\n f = file(out_path, 'w')\n f.write(\"list(\")\n yt = build_brugs_list_from_vector(\"y.t\", yt_vec)\n yt_var= build_brugs_list_from_vector(\"var.y\", yt_var_vec)\n \n # here we deal with covariates\n all_covs = []\n # the covariates (or, xi) double[,] matrix has a 1s column at index 0. we're not interested in this.\n for cov_index in range(covariates.GetLength(1) - 1):\n all_covs.append(build_brugs_list_from_vector(\"x%s\"%cov_index, get_col(covariates, cov_index+1)))\n vecs_for_brugs =[yt, yt_var]\n vecs_for_brugs.extend(all_covs)\n \n f.write(\", \".join(vecs_for_brugs))\n #f.write(\", precnorm=%s, gamma.a=%s, gamma.b=%s, N=%s)\" % \n #(precnorm, prior_on_q, prior_on_lam, num_studies))\n f.write(\", precnorm=%s, N=%s)\" % (precnorm, num_studies))\n f.close()\n \n \ndef generate_data_file_cont_one_group(out_path, num_studies, yt_vec, sdt_vec, nt_vec, prior_on_q, prior_on_lam, covariates, precnorm=.000001):\n '''\n out_path -- data file will be written here\n num_studies -- number of studies total (equal to len(*_vec), or, N)\n yt_vec -- vector of means in treatment groups over all N studies\n sdt_vec -- vector of standard deviations in treatment groups over all N studies\n nt_vec -- number of subjects in treated groups over all N studies\n covs -- matrix of covariates\n '''\t\n precnorm = \"%15.10f\" % precnorm\n cont_data = []\n f = file(out_path, 'w')\n f.write(\"list(\")\n yt = build_brugs_list_from_vector(\"y.t\", yt_vec)\n sdt = build_brugs_list_from_vector(\"sd.t\", sdt_vec)\n nt = build_brugs_list_from_vector(\"n.t\", nt_vec)\n \n # here we deal with covariates\n all_covs = []\n # the covariates (or, xi) double[,] matrix has a 1s column at index 0. we're not interested in this.\n for cov_index in range(covariates.GetLength(1) - 1):\n all_covs.append(build_brugs_list_from_vector(\"x%s\"%cov_index, get_col(covariates, cov_index+1)))\n vecs_for_brugs =[yt, sdt, nt]\n vecs_for_brugs.extend(all_covs)\n \n f.write(\", \".join(vecs_for_brugs))\n f.write(\", precnorm=%s, gamma.a=%s, gamma.b=%s, N=%s)\" % \n (precnorm, prior_on_q, prior_on_lam, num_studies))\n f.close()\n \n \n\ndef get_col(d, col_index):\n return [d[i, col_index] for i in range(d.GetLength(0))]\n \n \ndef bugs_cmd(s, brugs_wrapper_path):\n clr.AddReferenceToFileAndPath(brugs_wrapper_path)\n from BRUGSWrapper import *\n b = BRUGS()\n return b.ExecuteCmdBRUGS(s)\n \n \ndef build_arrs_from_file(f_path):\n file = open(f_path, 'r')\n lines = file.readlines()[1:] # drop the headers\n file.close()\n \n pc_rows = []\n for row in lines:\n pc_rows.append(ma_format_arr(parse_numbers_from_line(row)))\n \n return pc_rows\n \n\ndef insert_arr_into_mat(mat, row, arr):\n for col in range(len(arr)):\n mat[row, col] = arr[col]\n \n \ndef ma_format_arr(arr):\n # mean, sd, 2.5%, median, 97.5%, mc_error\n ma_arr = arr[:2]\n ma_arr.extend(arr[3:6])\n ma_arr.append(arr[2])\n return ma_arr\n \ndef get_first_line_arr(f_path):\n file = open(f_path, 'r')\n l = parse_numbers_from_line(file.readlines()[1])\n file.close()\n return l\n \n \ndef parse_numbers_from_line(line):\n nums = [x for x in line.split(\" \") if x!= ''][1:] # get rid of variable name\n return [eval(num) for num in nums]\n \n\ndef build_brugs_list_from_vector(name, v):\n return name + \"=c(\" + \",\".join([str(x) for x in v])+ \")\"" }, { "alpha_fraction": 0.5613813996315002, "alphanum_fraction": 0.5716730356216431, "avg_line_length": 40.54357147216797, "blob_id": "25dbf8e138d8b4220b29edd65b022e6f7911f523", "content_id": "e057e5b2dd671119b5c84e9f80f536e58a4462d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 80065, "license_type": "no_license", "max_line_length": 140, "num_lines": 1882, "path": "/src/R/openmetar/R/plotting.r", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "####################################\r\n# #\r\n# OpenMeta[Analyst] #\r\n# ---- #\r\n# plotting.r # \r\n# #\r\n# Flexible forest plotting. # \r\n# (And more?) #\r\n# #\r\n# This code due mostly to Issa #\r\n# Dahabreh and Paul Trow # \r\n####################################\r\n\r\n# largely a generalization based on an example by\r\n# Murrell P., \"R graphics\", Chapman & Hall\r\nlibrary(\"grid\")\r\n\r\n#################################################################\r\n# functions for creating plot data to pass to plot functions #\r\n#################################################################\r\n\r\ncreate.plot.data.generic <- function(om.data, params, res, selected.cov=NULL){\r\n # Creates a data structure that can be passed to forest.plot\r\n # res is the output of a call to the Metafor function rma\r\n \r\n scale.str <- get.scale(params)\r\n transform.name <- get.transform.name(om.data)\r\n plot.options <- set.plot.options(params)\r\n # Set n, the number of studies, for PFT metric.\r\n if (params$measure==\"PFT\" && length(om.data@g1O1) > 1 && length(om.data@g1O2)) {\r\n n <- om.data@g1O1 + om.data@g1O2 # Number of subjects\r\n }\r\n\telse {\r\n\t\tn <- NULL # not needed except for pft\r\n\t}\r\n \r\n if (params$fp_plot_lb == \"[default]\") {\r\n plot.options$plot.lb <- params$fp_plot_lb\r\n } else {\r\n plot.lb <- eval(parse(text=paste(\"c(\", params$fp_plot_lb, \")\", sep=\"\")))\r\n plot.options$plot.lb <- eval(call(transform.name, params$measure))$calc.scale(plot.lb, n)\r\n } \r\n \r\n if (params$fp_plot_ub == \"[default]\") {\r\n plot.options$plot.ub <- params$fp_plot_ub\r\n } else {\r\n plot.ub <- eval(parse(text=paste(\"c(\", params$fp_plot_ub, \")\", sep=\"\")))\r\n if (scale.str == \"logit\") {\r\n plot.ub <- min(1, plot.ub)\r\n } \r\n plot.options$plot.ub <- eval(call(transform.name, params$measure))$calc.scale(plot.ub, n)\r\n } \r\n \r\n digits.str <- paste(\"%.\", params$digits, \"f\", sep=\"\")\r\n # heterogeity data\r\n tau2 <- sprintf(digits.str, res$tau2)\r\n degf <- res$k - 1\r\n QLabel = paste(\"Q(df=\", degf, \")\", sep=\"\")\r\n if (!is.null(res$QE)) {\r\n QE <- sprintf(digits.str, res$QE)\r\n } else {\r\n QE <- \"NA\"\r\n }\r\n if (!is.null(res$I2)) {\r\n I2 <- paste(round(res$I2, digits = 2), \"%\")\r\n } else {\r\n I2 <- \"NA\"\r\n }\r\n if (!is.null(res$QEp)) {\r\n if (res$QEp < 10^(-params$digits)) {\r\n PLabel <- \"P\" \r\n } else {\r\n PLabel <- \"P=\"\r\n }\r\n QEp <- round.display(res$QEp, params$digits)\r\n } else {\r\n PLabel <- \"\"\r\n QEp <- \"NA\"\r\n } \r\n \r\n overall <- paste(\"Overall (I^2=\", I2, \" , \", PLabel, QEp, \")\", sep=\"\")\r\n # append years to study names unless year equals 0 (0 is passed to R when year is empty).\r\n study.names <- [email protected]\r\n years <- om.data@years\r\n study.names[years != 0] <- paste(study.names[years != 0], years[years != 0], sep=\" \")\r\n plot.data <- list(label = c(paste(params$fp_col1_str, sep = \"\"), study.names, overall),\r\n types = c(3, rep(0, length([email protected])), 2),\r\n scale = scale.str,\r\n options = plot.options) \r\n y.overall <- res$b[1]\r\n lb.overall <- res$ci.lb[1]\r\n ub.overall <- res$ci.ub[1]\r\n y <- om.data@y\r\n study.ci.bounds <- calc.ci.bounds(om.data, params, ni=n)\r\n lb <- study.ci.bounds$lb\r\n ub <- study.ci.bounds$ub\r\n \r\n #y.disp <- eval(call(transform.name, params$measure))$display.scale(y, n)\r\n #lb.disp <- eval(call(transform.name, params$measure))$display.scale(lb, n)\r\n #ub.disp <- eval(call(transform.name, params$measure))$display.scale(ub, n)\r\n \r\n #y.overall.disp <- eval(call(transform.name, params$measure))$display.scale(y.overall, n)\r\n #lb.overall.disp <- eval(call(transform.name, params$measure))$display.scale(lb.overall, n)\r\n #ub.overall.disp <- eval(call(transform.name, params$measure))$display.scale(ub.overall, n)\r\n\t\r\n\ty.disp <- eval(call(transform.name, params$measure))$display.scale(y, ni=n)\r\n\tlb.disp <- eval(call(transform.name, params$measure))$display.scale(lb, ni=n)\r\n\tub.disp <- eval(call(transform.name, params$measure))$display.scale(ub, ni=n)\r\n\t\r\n\ty.overall.disp <- eval(call(transform.name, params$measure))$display.scale(y.overall, ni=n)\r\n\tlb.overall.disp <- eval(call(transform.name, params$measure))$display.scale(lb.overall, ni=n)\r\n\tub.overall.disp <- eval(call(transform.name, params$measure))$display.scale(ub.overall, ni=n)\r\n \r\n y <- c(y, y.overall)\r\n lb <- c(lb, lb.overall)\r\n ub <- c(ub, ub.overall)\r\n \r\n y.disp <- c(y.disp, y.overall.disp)\r\n lb.disp <- c(lb.disp, lb.overall.disp)\r\n ub.disp <- c(ub.disp, ub.overall.disp)\r\n \r\n effects.disp <- list(y.disp=y.disp, lb.disp=lb.disp, ub.disp=ub.disp)\r\n # these values will be displayed on the plot\r\n plot.data$effects.disp <- effects.disp\r\n \r\n # If metric is log scale, effect sizes and plot range are passed to forest.plot in\r\n # calculation (log) scale, in order to set tick marks in log scale.\r\n # Otherwise, effect sizes and plot range are passed in display (untransformed) scale.\r\n \r\n if (!metric.is.log.scale(params$measure)) {\r\n # if metric not log scale, pass data in display scale - no scaling on x-axis\r\n y <- y.disp\r\n lb <- lb.disp\r\n ub <- ub.disp\r\n }\r\n \r\n effects <- list(ES = y,\r\n LL = lb,\r\n UL = ub) \r\n plot.data$effects <- effects\r\n plot.range <- calc.plot.range(effects, plot.options)\r\n # Calculate a reasonable range for the x-values to display in plot.\r\n \r\n if (metric.is.log.scale(params$measure)) {\r\n # Plot range is in calc scale, so put back in display scale to update params.\r\n plot.range.disp.lower <- eval(call(transform.name, params$measure))$display.scale(plot.range[1])\r\n plot.range.disp.upper <- eval(call(transform.name, params$measure))$display.scale(plot.range[2])\r\n } else {\r\n plot.range.disp.lower <- plot.range[1]\r\n plot.range.disp.upper <- plot.range[2]\r\n }\r\n plot.data$plot.range <- plot.range\r\n changed.params <- plot.options$changed.params\r\n if (plot.options$plot.lb != plot.range.disp.lower) {\r\n changed.params$fp_plot_lb <- plot.range.disp.lower \r\n }\r\n if (plot.options$plot.ub != plot.range.disp.upper) {\r\n changed.params$fp_plot_ub <- plot.range.disp.upper \r\n }\r\n plot.data$changed.params <- changed.params\r\n \r\n if (!is.null(selected.cov)){\r\n cov.val.str <- paste(\"om.data@covariates$\", selected.cov, sep=\"\")\r\n cov.values <- eval(parse(text=cov.val.str))\r\n plot.data$covariate <- list(varname = selected.cov,\r\n values = cov.values)\r\n }\r\n plot.data\r\n}\r\n\r\ncreate.plot.data.binary <- function(binary.data, params, res, selected.cov = NULL){\r\n \r\n plot.data <- create.plot.data.generic(binary.data, params, res, selected.cov=selected.cov)\r\n # if we have raw data, add it to plot.data\r\n if (length(binary.data@g1O1) > 0) {\r\n \r\n plot.data$col3 <- list(nums = binary.data@g1O1, denoms = binary.data@g1O1 + binary.data@g1O2)\r\n }\r\n \r\n if (length(binary.data@g2O1) > 0) {\r\n plot.data$col4 <- list(nums = binary.data@g2O1, denoms = binary.data@g2O1 + binary.data@g2O2)\r\n }\r\n \r\n plot.data\r\n}\r\n\r\ncreate.plot.data.diagnostic <- function(diagnostic.data, params, res, selected.cov = NULL){\r\n\r\n plot.data <- create.plot.data.generic(diagnostic.data, params, res, selected.cov=selected.cov)\r\n plot.options <- plot.data$plot.options\r\n plot.options$show.y.axis <- FALSE\r\n changed.params <- plot.data$changed.params\r\n # don't show y axis in diagnostic forest plots\r\n # if we have raw data, add it to plot.data\r\n if (length(diagnostic.data@TP) > 0) {\r\n raw.data <- list(\"TP\"=diagnostic.data@TP, \"FN\"=diagnostic.data@FN, \"TN\"=diagnostic.data@TN, \"FP\"=diagnostic.data@FP)\r\n terms <- compute.diagnostic.terms(raw.data, params)\r\n plot.data$col3 <- list(nums=terms$numerator, denoms=terms$denominator)\r\n \r\n metric <- params$measure\r\n # create label for column 3 based on metric\r\n label <- switch(metric,\r\n # sensitivity\r\n Sens = \"TP/(TP + FN)\", \r\n # specificity\r\n Spec = \"TN/(FP + TN)\",\r\n # pos. predictive value\r\n PPV = \"TP/(TP + FP)\",\r\n #neg. predictive value\r\n NPV = \"TN/(TN + FN)\",\r\n # accuracy\r\n Acc = \"(TP + TN)/Tot\",\r\n # positive likelihood ratio\r\n PLR = \"(TP * Di-)/(FP * Di+)\", \r\n # negative likelihood ratio\r\n NLR = \"(FN * Di-)/(TN * Di+)\",\r\n # diagnostic odds ratio\r\n DOR = \"(TP * TN)/(FP * FN)\")\r\n \r\n plot.data$options$col3.str <- label\r\n changed.params$fp_col3_str <- label\r\n plot.data$changed.params <- changed.params\r\n }\r\n plot.data\r\n}\r\n\r\ncreate.plot.data.continuous <- function(cont.data, params, res, selected.cov = NULL){\r\n # Creates a data structure that can be passed to forest.plot\r\n # res is the output of a call to the Metafor function rma\r\n plot.data <- create.plot.data.generic(cont.data, params, res, selected.cov=selected.cov)\r\n plot.data\r\n}\r\n\r\ncreate.plot.data.overall <- function(om.data, params, res, res.overall){\r\n scale.str <- get.scale(params)\r\n # Set n, the number of studies, for PFT metric.\r\n if (params$measure==\"PFT\" && length(om.data@g1O1) > 1 && length(om.data@g1O2)) {\r\n n <- om.data@g1O1 + om.data@g1O2 # Number of subjects\r\n }\r\n\telse {\r\n\t n <- NULL\r\n\t}\r\n \r\n ## TO DO - don't really nead three transforms - the transform only depends on the measure.\r\n transform.name <- get.transform.name(om.data)\r\n plot.options <- set.plot.options(params)\r\n plot.options$show.col3 <- FALSE\r\n plot.options$show.col4 <- FALSE\r\n # currently not displaying raw data cols. for overall plots\r\n\r\n if (params$fp_plot_lb == \"[default]\") {\r\n plot.options$plot.lb <- params$fp_plot_lb\r\n } else {\r\n plot.lb <- eval(parse(text=paste(\"c(\", params$fp_plot_lb, \")\", sep=\"\")))\r\n plot.options$plot.lb <- eval(call(transform.name, params$measure))$calc.scale(plot.lb, n)\r\n }\r\n if (params$fp_plot_ub == \"[default]\") {\r\n plot.options$plot.ub <- params$fp_plot_ub\r\n } else {\r\n plot.ub <- eval(parse(text=paste(\"c(\", params$fp_plot_ub, \")\", sep=\"\")))\r\n plot.options$plot.ub <- eval(call(transform.name, params$measure))$calc.scale(plot.ub, n)\r\n } \r\n if (metric.is.log.scale(params$measure)) {\r\n plot.options$show.y.axis <- FALSE\r\n # don't show y-axis for diagnostic forest plots\r\n } else {\r\n plot.options$show.y.axis <- TRUE\r\n } \r\n\r\n plot.data <- list( scale = scale.str,\r\n options = plot.options)\r\n # unpack data\r\n y <- NULL\r\n lb <- NULL\r\n ub <- NULL\r\n \r\n for (count in 1:length(res)) {\r\n y <- c(y, res[[count]]$b)\r\n lb <- c(lb, res[[count]]$ci.lb)\r\n ub <- c(ub, res[[count]]$ci.ub)\r\n }\r\n \r\n y.disp <- eval(call(transform.name, params$measure))$display.scale(y, n)\r\n lb.disp <- eval(call(transform.name, params$measure))$display.scale(lb, n)\r\n ub.disp <- eval(call(transform.name, params$measure))$display.scale(ub, n) \r\n effects.disp <- list(y.disp=y.disp, lb.disp=lb.disp, ub.disp=ub.disp)\r\n plot.data$effects.disp <- effects.disp\r\n \r\n if (!metric.is.log.scale(params$measure)) {\r\n # if metric not log scale, pass data in display scale - no scaling on x-axis\r\n y <- y.disp\r\n lb <- lb.disp\r\n ub <- ub.disp\r\n }\r\n \r\n effects <- list(ES = y,\r\n LL = lb,\r\n UL = ub) \r\n plot.data$effects <- effects\r\n plot.range <- calc.plot.range(effects, plot.options)\r\n plot.data$plot.range <- plot.range\r\n # Put plot range in display scale to update params.\r\n plot.range.disp.lower <- eval(call(transform.name, params$measure))$display.scale(plot.range[1], n)\r\n plot.range.disp.upper <- eval(call(transform.name, params$measure))$display.scale(plot.range[2], n)\r\n changed.params <- plot.options$changed.params\r\n if (plot.options$plot.lb != plot.range.disp.lower) {\r\n changed.params$fp_plot_lb <- plot.range.disp.lower \r\n }\r\n if (plot.options$plot.ub != plot.range.disp.upper) {\r\n changed.params$fp_plot_ub <- plot.range.disp.upper \r\n }\r\n if (metric.is.log.scale(params$measure)) {\r\n plot.data$summary.est <- res.overall$b[1]\r\n # Pass in calc. scale if metric is log scale\r\n } else {\r\n plot.data$summary.est <- eval(call(transform.name, params$measure))$display.scale(res.overall$b[1], n)\r\n }\r\n plot.data$changed.params <- changed.params\r\n plot.data\r\n}\r\n\r\ncreate.plot.data.cum <- function(om.data, params, res) {\r\n # Wrapper for creating cumulative plot.data\r\n params$show_col1 <- 'FALSE'\r\n # don't show study names for right-hand plot\r\n res.overall <- res[[length(res)]]\r\n # Last entry of res contains overall summary\r\n plot.data <- create.plot.data.overall(om.data, params, res, res.overall)\r\n \r\n study.names <- c()\r\n study.names <- paste(\" \", [email protected][1], sep=\"\") \r\n for (count in 2:length([email protected])) {\r\n study.names <- c(study.names, paste(\"+ \",[email protected][count], sep=\"\"))\r\n }\r\n # duplicate last row of data to generate an empty row in the cumulative plot.\r\n # This data does not get plotted! Just aligns rows with standard plot.\r\n effects.disp.tmp <- plot.data$effects.disp\r\n y.disp.tmp <- effects.disp.tmp$y.disp\r\n lb.disp.tmp <- effects.disp.tmp$lb.disp\r\n ub.disp.tmp <- effects.disp.tmp$ub.disp\r\n last.index <- length(y.disp.tmp)\r\n y.disp.tmp <- c(y.disp.tmp, y.disp.tmp[last.index])\r\n lb.disp.tmp <- c(lb.disp.tmp, lb.disp.tmp[last.index])\r\n ub.disp.tmp <- c(ub.disp.tmp, ub.disp.tmp[last.index])\r\n effects.disp <- list(\"y.disp\"=y.disp.tmp, \"lb.disp\"=lb.disp.tmp, \"ub.disp\"=ub.disp.tmp)\r\n plot.data$effects.disp <- effects.disp\r\n \r\n effects.tmp <- plot.data$effects\r\n ES.tmp <- effects.tmp$ES\r\n LL.tmp <- effects.tmp$LL\r\n UL.tmp <- effects.tmp$UL\r\n last.index <- length(ES.tmp)\r\n ES.tmp <- c(ES.tmp, ES.tmp[last.index])\r\n LL.tmp <- c(LL.tmp, LL.tmp[last.index])\r\n UL.tmp <- c(UL.tmp, UL.tmp[last.index])\r\n effects <- list(\"ES\"=ES.tmp, \"LL\"=LL.tmp, \"UL\"=UL.tmp)\r\n plot.data$effects<- effects\r\n plot.data$types <- c(3, rep(0, length(study.names)), 4)\r\n # type 4 does not get plotted! Generates empty row in plot.\r\n study.names <- c(study.names, \"\")\r\n # extra blank name to align rows with standard plot\r\n plot.data$label <- c(as.character(params$fp_col1_str), study.names) \r\n plot.data\r\n}\r\n\r\ncreate.plot.data.loo <- function(om.data, params, res) {\r\n # wrapper for creating leave-one-out plot.data\r\n res.overall <- res[[1]]\r\n # First entry of res contains overall summary\r\n study.names <- c(\"Overall\", paste(\"- \", [email protected], sep=\"\"))\r\n plot.data <- create.plot.data.overall(om.data, params, res, res.overall)\r\n plot.data$label <- c(as.character(params$fp_col1_str), study.names)\r\n plot.data$types <- c(3, 5, rep(0, length([email protected])))\r\n plot.data\r\n}\r\n\r\n# create subgroup analysis plot data\r\ncreate.subgroup.plot.data.generic <- function(subgroup.data, params, data.type, selected.cov=NULL) {\r\n \r\n grouped.data <- subgroup.data$grouped.data\r\n res <- subgroup.data$results\r\n subgroup.list <- subgroup.data$subgroup.list\r\n scale.str <- get.scale(params)\r\n # Set n, the number of studies, for PFT metric.\r\n if (params$measure==\"PFT\" && length(om.data@g1O1) > 1 && length(om.data@g1O2)) {\r\n n <- om.data@g1O1 + om.data@g1O2 # Number of subjects\r\n }\r\n\telse {\r\n\t\tn <- NULL\r\n\t}\r\n \r\n ## TO DO - don't really nead three transforms - the transform only depends on the measure.\r\n if (data.type == \"continuous\") {\r\n transform.name <- \"continuous.transform.f\"\r\n } else if (data.type == \"diagnostic\") {\r\n transform.name <- \"diagnostic.transform.f\"\r\n } else if (data.type == \"binary\") {\r\n transform.name <- \"binary.transform.f\"\r\n }\r\n cur.res <- NULL\r\n y <- NULL\r\n lb <- NULL\r\n ub <- NULL\r\n label.col <- NULL\r\n types <- NULL\r\n alpha <- 1.0-(params$conf.level/100.0)\r\n mult <- abs(qnorm(alpha/2.0))\r\n digits.str <- paste(\"%.\", params$digits, \"f\", sep=\"\")\r\n \r\n for (i in 1:length(subgroup.list)){\r\n # create plot data for each subgroup and concatenate results\r\n cur.res <- res[[i]]\r\n params.tmp <- params\r\n cur.y.overall <- cur.res$b[1]\r\n cur.lb.overall <- cur.res$ci.lb[1]\r\n cur.ub.overall <- cur.res$ci.ub[1]\r\n cur.y <- grouped.data[[i]]@y\r\n cur.lb <- cur.y - mult*grouped.data[[i]]@SE\r\n cur.ub <- cur.y + mult*grouped.data[[i]]@SE\r\n y <- c(y, cur.y, cur.y.overall)\r\n lb <- c(lb, cur.lb, cur.lb.overall)\r\n ub <- c(ub, cur.ub, cur.ub.overall)\r\n \r\n # heterogeneity data\r\n degf <- cur.res$k - 1\r\n if (!is.null(cur.res$QE)) {\r\n QE <- sprintf(digits.str, cur.res$QE)\r\n } else {\r\n QE <- \"NA\"\r\n }\r\n if (!is.null(cur.res$I2)) {\r\n I2 <- paste(round(cur.res$I2, digits = 2), \"%\")\r\n } else {\r\n I2 <- \"NA\"\r\n }\r\n if (!is.null(cur.res$QEp)) {\r\n QEp <- sprintf(digits.str, cur.res$QEp)\r\n } else {\r\n QEp <- \"NA\"\r\n } \r\n \r\n overall <- paste(\" (I^2=\", I2, \" , P=\", QEp, \")\", sep=\"\")\r\n types <- c(types, rep(0, length(grouped.data[[i]]@study.names)), 1)\r\n label.col <-c(label.col, grouped.data[[i]]@study.names, paste(\"Subgroup \", subgroup.list[i], overall, sep=\"\"))\r\n } \r\n cur.res <- res[[length(subgroup.list) + 1]]\r\n cur.y.overall <- cur.res$b[1]\r\n cur.lb.overall <- cur.res$ci.lb[1]\r\n cur.ub.overall <- cur.res$ci.ub[1]\r\n y <- c(y, cur.y.overall)\r\n lb <- c(lb, cur.lb.overall)\r\n ub <- c(ub, cur.ub.overall)\r\n types <- c(3,types, 2)\r\n # heterogeneity data\r\n degf <- cur.res$k - 1\r\n if (!is.null(cur.res$QE)) {\r\n QE <- sprintf(digits.str, cur.res$QE)\r\n } else {\r\n QE <- \"NA\"\r\n }\r\n if (!is.null(cur.res$I2)) {\r\n I2 <- paste(round(cur.res$I2, digits = 2), \"%\")\r\n } else {\r\n I2 <- \"NA\"\r\n }\r\n if (!is.null(cur.res$QEp)) {\r\n QEp <- sprintf(digits.str, cur.res$QEp)\r\n } else {\r\n QEp <- \"NA\"\r\n } \r\n overall <- paste(\" (I^2=\", I2, \" , P=\", QEp, \")\", sep=\"\")\r\n label.col <- c(as.character(params$fp_col1_str), label.col, paste(\"Overall\", overall, sep=\"\"))\r\n plot.options <- set.plot.options(params)\r\n if (params$fp_plot_lb == \"[default]\") {\r\n plot.options$plot.lb <- params$fp_plot_lb\r\n } else {\r\n plot.lb <- eval(parse(text=paste(\"c(\", params$fp_plot_lb, \")\", sep=\"\")))\r\n plot.options$plot.lb <- eval(call(transform.name, params$measure))$calc.scale(plot.lb, n)\r\n }\r\n if (params$fp_plot_ub == \"[default]\") {\r\n plot.options$plot.ub <- params$fp_plot_ub\r\n } else {\r\n plot.ub <- eval(parse(text=paste(\"c(\", params$fp_plot_ub, \")\", sep=\"\")))\r\n plot.options$plot.ub <- eval(call(transform.name, params$measure))$calc.scale(plot.ub, n)\r\n }\r\n\r\n # should we show summary line for subgroup plots??\r\n plot.data <- list(label = label.col,\r\n types=types,\r\n scale = scale.str,\r\n options = plot.options) \r\n y.disp <- eval(call(transform.name, params$measure))$display.scale(y, n)\r\n lb.disp <- eval(call(transform.name, params$measure))$display.scale(lb, n)\r\n ub.disp <- eval(call(transform.name, params$measure))$display.scale(ub, n)\r\n \r\n # these values will be displayed on the plot\r\n effects.disp <- list(y.disp=y.disp, lb.disp=lb.disp, ub.disp=ub.disp)\r\n plot.data$effects.disp <- effects.disp\r\n \r\n if (!metric.is.log.scale(params$measure)) {\r\n # if metric not log scale, pass data in display scale - no scaling on x-axis\r\n y <- y.disp\r\n lb <- lb.disp\r\n ub <- ub.disp\r\n }\r\n \r\n effects <- list(ES = y,\r\n LL = lb,\r\n UL = ub)\r\n \r\n plot.data$effects <- effects\r\n plot.range <- calc.plot.range(effects, plot.options)\r\n plot.data$plot.range <- plot.range\r\n # Put plot range in display scale to update params.\r\n plot.range.disp.lower <- eval(call(transform.name, params$measure))$display.scale(plot.range[1], n)\r\n plot.range.disp.upper <- eval(call(transform.name, params$measure))$display.scale(plot.range[2], n)\r\n changed.params <- plot.options$changed.params\r\n if (plot.options$plot.lb != plot.range.disp.lower) {\r\n changed.params$fp_plot_lb <- plot.range.disp.lower \r\n }\r\n if (plot.options$plot.ub != plot.range.disp.upper) {\r\n changed.params$fp_plot_ub <- plot.range.disp.upper \r\n }\r\n plot.data$changed.params <- changed.params\r\n\r\n if (!is.null(selected.cov)){\r\n cov.val.str <- paste(\"om.data@covariates$\", selected.cov, sep=\"\")\r\n cov.values <- eval(parse(text=cov.val.str))\r\n plot.data$covariate <- list(varname = selected.cov,\r\n values = cov.values)\r\n }\r\n plot.data\r\n}\r\n\r\ncreate.subgroup.plot.data.binary <- function(subgroup.data, params) {\r\n grouped.data <- subgroup.data$grouped.data\r\n plot.data <- create.subgroup.plot.data.generic(subgroup.data, params, data.type=\"binary\") \r\n\r\n # if we have raw data, add it to plot.data\r\n if (length(grouped.data[[1]]@g1O1) > 0) {\r\n \r\n plot.data$col3 <- list(nums = subgroup.data$col3.nums, denoms = subgroup.data$col3.denoms)\r\n }\r\n \r\n if (length(grouped.data[[1]]@g2O1) > 0) {\r\n plot.data$col4 <- list(nums = subgroup.data$col4.nums, denoms = subgroup.data$col4.denoms)\r\n }\r\n plot.data\r\n}\r\n\r\ncreate.subgroup.plot.data.diagnostic <- function(subgroup.data, params) {\r\n grouped.data <- subgroup.data$grouped.data\r\n plot.data <- create.subgroup.plot.data.generic(subgroup.data, params, data.type=\"diagnostic\") \r\n if (length(grouped.data[[1]]@TP) > 0) {\r\n plot.data$col3 <- list(nums = subgroup.data$col3.nums, denoms = subgroup.data$col3.denoms)\r\n \r\n metric <- params$measure\r\n # create label for column 3 based on metric\r\n label <- switch(metric,\r\n # sensitivity\r\n Sens = \"TP / (TP + FN)\", \r\n # specificity\r\n Spec = \"TN / (FP + TN)\",\r\n # pos. predictive value\r\n PPV = \"TP / (TP + FP)\",\r\n #neg. predictive value\r\n NPV = \"TN / (TN + FN)\",\r\n # accuracy\r\n Acc = \"(TP + TN) / Tot\",\r\n # positive likelihood ratio\r\n PLR = \"(TP * Di-) / (FP * Di+)\", \r\n # negative likelihood ratio\r\n NLR = \"(FN * Di-) / (TN * Di+)\",\r\n # diagnostic odds ratio\r\n DOR = \"(TP * TN) / (FP * FN\")\r\n #data.col <- format.raw.data.col(nums = terms$numerator, denoms = terms$denominator, label = label) \r\n #plot.data$additional.col.data$cases = data.col\r\n plot.data$options$col3.str <- label\r\n }\r\n plot.data\r\n}\r\n\r\ncreate.subgroup.plot.data.cont <- function(subgroup.data, params) {\r\n grouped.data <- subgroup.data$grouped.data\r\n plot.data <- create.subgroup.plot.data.generic(subgroup.data, params, data.type=\"continuous\") \r\n}\r\n\r\n# create regression plot data\r\ncreate.plot.data.reg <- function(reg.data, params, fitted.line) {\r\n scale.str <- get.scale(params)\r\n cov.name <- reg.data@covariates[[1]]@cov.name\r\n cov.vals <- reg.data@covariates[[1]]@cov.vals\r\n plot.data <- list(\"fitted.line\" = fitted.line,\r\n types = c(rep(0, length([email protected]))),\r\n scale = scale.str,\r\n covariate = list(varname = cov.name, values = cov.vals))\r\n alpha <- 1.0-(params$conf.level/100.0)\r\n mult <- abs(qnorm(alpha/2.0))\r\n \r\n \r\n y <- reg.data@y\r\n se <- reg.data@SE\r\n effects <- list(ES = y,\r\n se = se)\r\n plot.data$effects <- effects\r\n\r\n ###\r\n # @TODO; these need to be set by the user,\r\n # will probably be placed on the params object\r\n plot.data$sym.size <- 1\r\n plot.data$lcol <- \"darkred\"\r\n plot.data$lweight <- 3\r\n plot.data$lpattern <- \"dotted\"\r\n plot.data$plotregion <- \"n\"\r\n plot.data$mcolor <- \"darkgreen\"\r\n plot.data$regline <- TRUE\r\n\r\n plot.data\r\n}\r\n\r\n\r\n\r\nset.plot.options <- function(params) {\r\n # set default plot options\r\n plot.options <- list()\r\n changed.params <- list()\r\n # xticks is a vector of tick marks for the x-axis\r\n if (params$fp_xticks[1] == '[default]') {\r\n plot.options$xticks <- NA\r\n } else if (is.vector(params$fp_xticks)) {\r\n # params was saved from a previous run and plot is being edited.\r\n plot.options$xticks <- params$fp_xticks\r\n } else {\r\n # params being passed in from GUI - convert to a vector.\r\n plot.options$xticks <- eval(parse(text=paste(\"c(\", params$fp_xticks, \")\", sep=\"\")))\r\n }\r\n if (params$fp_show_col1=='TRUE') {\r\n plot.options$show.study.col <- TRUE\r\n } else {\r\n plot.options$show.study.col <- FALSE\r\n }\r\n plot.options$col1.str <- as.character(params$fp_col1_str)\r\n \r\n if (params$fp_show_col2=='TRUE') {\r\n plot.options$show.col2 <- TRUE\r\n } else {\r\n plot.options$show.col2 <- FALSE\r\n }\r\n if (params$fp_col2_str == \"[default]\") {\r\n col2.str <- paste(\"Estimate (\", params$conf.level, \"% C.I.)\", sep=\"\")\r\n plot.options$col2.str <- col2.str\r\n changed.params$fp_col2_str <- col2.str\r\n } else {\r\n plot.options$col2.str <- as.character(params$fp_col2_str)\r\n }\r\n\r\n if (params$fp_show_col3=='TRUE') {\r\n plot.options$show.col3 <- TRUE\r\n } else {\r\n plot.options$show.col3 <- FALSE\r\n }\r\n if (!is.null(params$fp_col3_str)) {\r\n plot.options$col3.str <- as.character(params$fp_col3_str)\r\n }\r\n if ((params$fp_show_col4=='TRUE') && (!as.character(params$measure) %in% c(\"PR\", \"PLN\", \"PLO\", \"PAS\", \"PFT\"))) {\r\n # don't show col. 4 if metric is one-arm.\r\n plot.options$show.col4 <- TRUE\r\n } else {\r\n plot.options$show.col4 <- FALSE\r\n }\r\n if (!is.null(params$fp_col4_str)) {\r\n plot.options$col4.str <- as.character(params$fp_col4_str)\r\n }\r\n \r\n # xlabel is the label for the x-axis\r\n if (params$fp_xlabel == \"[default]\") {\r\n xlabel <- pretty.metric.name(as.character(params$measure))\r\n if (metric.is.log.scale(params$measure)) {\r\n xlabel <- paste(xlabel, \" (log scale)\", sep=\"\")\r\n }\r\n plot.options$xlabel <- xlabel\r\n changed.params$fp_xlabel <- xlabel\r\n } else {\r\n plot.options$xlabel <- as.character(params$fp_xlabel)\r\n }\r\n \r\n # fp.title is the title for forest plot\r\n # In future, this should be user option\r\n if (is.null(params$fp.title)) {\r\n plot.options$fp.title <- \"\"\r\n } else {\r\n plot.options$fp.title <- params$fp.title\r\n }\r\n \r\n # if show.summary.line is TRUE, a vertical dashed line is displayed at the\r\n # overall summary.\r\n if (params$fp_show_summary_line=='TRUE') { \r\n plot.options$show.summary.line <- TRUE\r\n } else {\r\n plot.options$show.summary.line <- FALSE\r\n }\r\n plot.options$show.y.axis <- TRUE\r\n \r\n plot.options$digits <- params$digits\r\n plot.options$changed.params <- changed.params\r\n plot.options\r\n} \r\n\r\ncalc.plot.range <- function(effects, plot.options) {\r\n # Calculate lower and upper bounds for x-values of plotted data\r\n # if user has not supplied them (or user's bounds don't include all effect sizes).\r\n effect.size.min <- min(effects$ES)\r\n # Smallest value for which we accept user's input for plot lower bound.\r\n # User's lower bound must be less than all effect sizes.\r\n effect.size.max <- max(effects$ES) \r\n # Largest user input for plot upper bound. All effect sizes must be less than this value.\r\n user.lb <- plot.options$plot.lb\r\n user.ub <- plot.options$plot.ub\r\n if (user.lb != \"[default]\") {\r\n # Check whether user's lb is OK\r\n if (user.lb > effect.size.min) {\r\n # not OK\r\n user.lb <- \"[default]\"\r\n }\r\n } \r\n if (user.ub != \"[default]\") {\r\n # Check whether user's lb is OK\r\n if (plot.options$plot.ub < effect.size.max) {\r\n # not OK\r\n user.ub <- \"[default]\"\r\n }\r\n }\r\n plot.range <- c()\r\n if (user.lb == \"[default]\" || user.ub == \"[default]\") {\r\n # If user has not supplied both lower and upper bounds (that meet the requirements), compute bounds.\r\n # This is a heuristic to determine a reasonable range for the displayed values - \r\n # confidence intervals that exceed this range are truncated and left or right arrows are displayed instead of the full CI.\r\n effect.size.width <- effect.size.max - effect.size.min\r\n \r\n effects.max <- max(effects$UL)\r\n effects.min <- min(effects$LL)\r\n arrow.factor <- 2\r\n # Confidence intervals extend at most arrow.factor times effect.size.width beyond (effect.size.min, effect.size.max)\r\n plot.ub <- min(effects.max, effect.size.max + arrow.factor * effect.size.width)\r\n plot.lb <- max(effects.min, effect.size.min - arrow.factor * effect.size.width)\r\n \r\n plot.range <- c(plot.lb, plot.ub)\r\n }\r\n if (user.lb != \"[default]\") {\r\n # If the user's lb input is OK, set lower bound of range equal it.\r\n plot.range[1] <- user.lb\r\n }\r\n if (user.ub != \"[default]\") {\r\n # If the user's ub input is OK, set upper bound of range equal it.\r\n plot.range[2] <- user.ub\r\n }\r\n plot.range\r\n}\r\n\r\npretty.metric.name <- function(metric) {\r\n # sub out the space in TX Mean\r\n metric <- gsub(\" \", \".\", metric)\r\n\r\n # labels for plot axes\r\n metric.name <- list(\r\n OR = \"Odds Ratio\",\r\n RD = \"Risk Difference\",\r\n MD = \"Mean Difference\",\r\n SMD = \"Standardized Mean Difference\",\r\n RR = \"Relative Risk\",\r\n AS = \"Arcsine Risk Difference\",\r\n PR = \"Proportion\",\r\n PLN = \"Log Proportion\", \r\n PLO = \"Logit Proportion\",\r\n PAS = \"Arcsine of Square Root Proportion\",\r\n PFT = \"Freeman-Tukey Double Arcsine Proportion\", \r\n PETO = \"Peto\",\r\n YUQ = \"Yule's Q\",\r\n YUY = \"Yules Y\",\r\n Sens = \"Sensitivity\", \r\n Spec = \"Specificity\",\r\n # pos. predictive value\r\n PPV = \"Positive Predictive Value\",\r\n #neg. predictive value\r\n NPV = \"Negative Predictive value\",\r\n # accuracy\r\n Acc = \"Accuracy\",\r\n # positive likelihood ratio\r\n PLR = \"Positive Likelihood Ratio\", \r\n # negative likelihood ratio\r\n NLR = \"Negative Likelihood Ratio\",\r\n # diagnostic odds ratio\r\n DOR = \"Diagnostic Odds Ratio\",\r\n # tx mean is already pretty.\r\n TXMean = \"TX Mean\",\r\n # Generic Effect\r\n GEN = \"Generic Effect\")[[metric]]\r\n\r\n metric.name\r\n}\r\n\r\n###################################\r\n# functions for creating plots #\r\n###################################\r\n\r\n#######################################\r\n# forest plot #\r\n#######################################\r\nforest.plot <- function(forest.data, outpath) {\r\n png(filename=paste(\"r_tmp\",\"INTER\",sep=\"/\")) # to fix windows popping out at you issue\r\n\t\r\n # calculates plot sizes and layout, and then calls draw.forest.plot.\r\n # forest.data is a list contains the following fields:\r\n #\r\n # - effects.disp - list with 3 fields:\r\n # - y.disp - vector of effect sizes in display scale\r\n # - lb.disp - conf. int. lower bound in display scale\r\n # - ub.disp - conf. int. upper bound in display scale\r\n #\r\n # - effects - list with 3 fields:\r\n # - ES - vector of effect sizes in calc. scale\r\n # - LL - conf. int. lower bound in calc. scale\r\n # - UL - conf. int. upper bound in calc. scale\r\n #\r\n # - types - vector specifying row types:\r\n # - 0 - study-level data\r\n # - 1 - subgroup summary data\r\n # - 2 - overall summary data\r\n # - 3 - row of column labels\r\n # - 4 - blank row (e.g. for empty summary row in right-hand side of cumulative plot)\r\n # - 5 - overall summary data with unscaled diamond (e.g. for leave-one-out plots)\r\n # \r\n # - label - vector of row labels of length 1 more than length of effect sizes.\r\n # First entry is usually \"Studies\" assuming first row has type 3.\r\n #\r\n # - scale - transformation scale - takes one of the following values:\r\n # - \"standard\" - untransformed\r\n # - \"log\"\r\n # - \"logit\"\r\n # - \"arcsine\" \r\n #\r\n # - options - plot options\r\n #\r\n # - plot range - range of x-values in which to draw plot\r\n # \r\n # \r\n forest.data <- format.data.cols(forest.data)\r\n # format the text of the data columns displayed on forest plot\r\n types <- forest.data$types\r\n num.labels <- length(forest.data$label)\r\n rows <- assign.rows(types, num.labels)\r\n # row numbers of forest plot including blank rows (after summary rows)\r\n forest.data$rows <- rows\r\n \r\n forest.data <- create.grobs(forest.data)\r\n # create graphical objects for study and data columns.\r\n \r\n plot.size <- calc.forest.plot.size(forest.data)\r\n # calculate height and width of output file\r\n forest.data$data.col.width <- plot.size$data.col.width\r\n how.wide <- plot.size$how.wide\r\n # width of output file\r\n how.tall <- plot.size$how.tall\r\n # height of output file\r\n viewport.layout <- calc.viewport.layout(forest.data, just=\"left\")\r\n # calculate the layout of the viewport\r\n \r\n # so here we're just going to use the relatively hacky \r\n # strategy of (R-)grepping for the literal \".png\"\r\n # note that this means that, technically, if someone tries \r\n # to save an iamge to my.pngimg.pdf, it will save it instead\r\n # as a png. on the other hand, why would someone do that?\r\n if (length(grep(\".png\", outpath)) != 0){\r\n png(file=outpath, width = how.wide, height = how.tall+2 , units = \"in\", res = 144) \r\n }\r\n else{\r\n pdf(file=outpath, width = how.wide+1, height = how.tall+2) \r\n }\r\n \r\n pushViewport(viewport(layout=viewport.layout))\r\n changed.params <- draw.forest.plot(forest.data)\r\n \r\n graphics.off()\r\n\r\n\r\n changed.params\r\n}\r\n\r\n#############################################################\r\n# functions for creating graphical objects and viewports #\r\n#############################################################\r\n\r\ncreate.grobs <- function(forest.data) {\r\n # create graphical objects for study and data cols.\r\n # and add them to forest.data\r\n show.study.col <- forest.data$options$show.study.col \r\n \r\n additional.cols.grob <- c()\r\n # create graphical object for data columns.\r\n if (length(forest.data$additional.col.data)>0 ){\r\n additional.cols.grob <- additional.columns(forest.data, \"bold\")\r\n forest.data$additional.cols.grob <- additional.cols.grob\r\n }\r\n if (show.study.col==TRUE) {\r\n study.col.grob <- study.column(forest.data, \"bold\")\r\n # create graphical object for study column\r\n forest.data$study.col.grob <- study.col.grob\r\n } \r\n forest.data\r\n}\r\n\r\nadditional.columns <- function(forest.data, font = \"bold\") {\r\n # Gets data for effect sizes column (col 2) and raw data (cols 3 and 4),\r\n # if user has chosen to display them.\r\n additional.columns <- vector(\"list\", length(forest.data$additional.col.data))\r\n \r\n for (j in 1:length(forest.data$additional.col.data)){\r\n content<-rep(NA, length(forest.data$label))\r\n\r\n for (i in 1:length(forest.data$label)){\r\n if ((forest.data$types[i] == 1) || (forest.data$types[i] == 2))\r\n content[i] <- list(textGrob(forest.data$additional.col.data[[j]][[i]], \r\n x=1, just = \"right\", gp = gpar(fontface = \"bold\", fontfamily=\"mono\", fontsize=\"10\")))\r\n else\r\n content[i] <- list(textGrob(forest.data$additional.col.data[[j]][[i]], \r\n x=1, just = \"right\", gp = gpar(fontface = \"plain\", fontfamily=\"mono\", fontsize=\"10\")))\r\n }\r\n rows <- forest.data$rows\r\n additional.columns[[j]] <-list(content = content, rows = rows)\r\n }\r\n additional.columns\r\n}\r\n\r\nstudy.column <- function(forest.data, title.font=\"bold\") {\r\n # Gets data for the study name column\r\n # called by draw.forest.plot\r\n content<-rep(NA, length(forest.data$label))\r\n for (i in 1:length(forest.data$label)){\r\n if (forest.data$types[i] != 0)\r\n content[i] <- list(textGrob(forest.data$label[i], x=0, just = \"left\", gp = gpar(fontface = title.font, fontsize=\"10\")))\r\n else\r\n content[i] <- list(textGrob(forest.data$label[i], x=0, just = \"left\", gp = gpar(fontface = \"plain\", fontsize=\"10\")))\r\n }\r\n \r\n study.column.list <- list(content = content)\r\n study.column.list\r\n}\r\n\r\ncalc.viewport.layout <- function(forest.data, just){\r\n # Calculates layout for forest plot viewport\r\n if (length(forest.data$additional.col.data)>0 ){\r\n num.additional.cols <- length(forest.data$additional.cols.grob) \r\n } else {\r\n num.additional.cols <- 0\r\n }\r\n forest.plot.params <- create.plot.options(forest.data, gapSize = 3.2, plotWidth=5)\r\n # @TODO: move these to forest plot options\r\n rows <- forest.data$rows\r\n num.rows <- rows[length(rows)]\r\n # number of rows including blank rows\r\n width.list <- calc.width.list(forest.data)\r\n num.cols <- length(width.list) + 1\r\n # 1 more for the plot itself\r\n\r\n if (length(width.list) > 0) {\r\n vp.width <- unit.c(width.list, forest.plot.params$effect.col.width)\r\n } else {\r\n vp.width <- unit.c(forest.plot.params$effect.col.width)\r\n }\r\n vp.layout <- grid.layout(num.rows+1, num.cols,\r\n widths=vp.width,\r\n heights = unit(rep(1, num.rows) , \"lines\"),\r\n just=just)\r\n}\r\n\r\ncalc.forest.plot.size <- function(forest.data){\r\n # Calculates width and height of the plot.\r\n show.study.col <- forest.data$options$show.study.col\r\n if (length(forest.data$additional.col.data)>0 ){\r\n num.additional.cols <- length(forest.data$additional.cols.grob) \r\n } else {\r\n num.additional.cols <- 0\r\n }\r\n forest.plot.params <- create.plot.options(forest.data, gapSize = 3.2, plotWidth=5)\r\n # @TODO: move these to forest.plot.options\r\n rows <- forest.data$rows\r\n num.rows <- rows[length(rows)]\r\n \r\n row.height <- convertY(unit(1, \"lines\") , \"inches\" , valueOnly=TRUE)\r\n\r\n # height of each row in inches \r\n how.tall <- num.rows * row.height\r\n width.list <- calc.width.list(forest.data)\r\n if (show.study.col==TRUE) {\r\n if (num.additional.cols > 0) {\r\n data.col.width <- sum( convertX( unit.c(width.list[3:length(width.list)]) , \"inches\" , valueOnly=TRUE ) ) + \r\n (num.additional.cols - 1) * convertX(forest.plot.params$col.gap, \"inches\" , valueOnly=TRUE )\r\n } else {\r\n data.col.width <- 0\r\n }\r\n \r\n } else {\r\n if (num.additional.cols > 0) {\r\n data.col.width <- sum( convertX( unit.c(width.list) , \"inches\" , valueOnly=TRUE ) ) + \r\n (num.additional.cols - 1) * convertX(forest.plot.params$col.gap, \"inches\" , valueOnly=TRUE )\r\n } else {\r\n data.col.width <- 0 \r\n }\r\n } \r\n \r\n if (length(width.list) > 0) {\r\n how.wide <- sum(convertX(unit.c(width.list) , \"inches\" , valueOnly=TRUE ) ) + \r\n # width of data columns\r\n convertX(forest.plot.params$effect.col.width, \"inches\" , valueOnly=TRUE ) +\r\n # width of actual forest plot\r\n 2 * convertX(forest.plot.params$col.gap, \"inches\" , valueOnly=TRUE )\r\n # two extra column gap widths for spacing.\r\n } else {\r\n how.wide <- convertX(forest.plot.params$effect.col.width, \"inches\" , valueOnly=TRUE ) +\r\n 2 * convertX(forest.plot.params$col.gap, \"inches\" , valueOnly=TRUE )\r\n }\r\n plot.size <- list(\"how.wide\"=how.wide, \"how.tall\"=how.tall, \"data.col.width\"=data.col.width)\r\n}\r\n \r\ncalc.width.list <- function(forest.data) {\r\n # calculate widths of study column and data columns.\r\n show.study.col <- forest.data$options$show.study.col\r\n forest.plot.params <- create.plot.options(forest.data, gapSize = 3.2, plotWidth=5)\r\n # @TODO: move these to forest plot options\r\n width.list <-vector(\"list\")\r\n if (show.study.col==TRUE) {\r\n study.col.grob <- forest.data$study.col.grob\r\n width.list[[1]] <- unit.c(max(unit(rep(1, length(forest.data$label)), \r\n \"grobwidth\", study.col.grob$content)), forest.plot.params$col.gap)\r\n if (length(forest.data$additional.col.data)>0 ) {\r\n additional.cols.grob <- forest.data$additional.cols.grob \r\n for (i in 1:length(additional.cols.grob)) {\r\n width.list[[i+1]] <- unit.c(width.list[[i]], max(unit(rep(1, length(forest.data$label)), \r\n \"grobwidth\", additional.cols.grob[[i]]$content)), forest.plot.params$col.gap) \r\n }\r\n }\r\n } else {\r\n if (length(forest.data$additional.col.data)>0 ) {\r\n additional.cols.grob <- forest.data$additional.cols.grob\r\n width.list[[1]] <- unit.c(max(unit(rep(1, length(forest.data$label)), \r\n \"grobwidth\", additional.cols.grob[[1]]$content)), forest.plot.params$col.gap)\r\n if (length(forest.data$additional.col.data)>1) {\r\n for (i in 2:length(additional.cols.grob)) {\r\n width.list[[i]] <- unit.c(width.list[[i-1]], max(unit(rep(1, length(forest.data$label)), \r\n \"grobwidth\", additional.cols.grob[[i]]$content)), forest.plot.params$col.gap)\r\n }\r\n }\r\n } \r\n }\r\n if (length(width.list) > 0) {\r\n width.list <- width.list[[length(width.list)]]\r\n }\r\n width.list\r\n}\r\n\r\nassign.rows <- function(types, num.labels) {\r\n # assign row numbers for plot data, skipping blank rows after rows of type 1,2, or 3\r\n rows<-c(1, rep(NA, (num.labels-1) ) )\r\n for (i in 1:(num.labels-1)){\r\n if (types[i] == 3 && (types[i+1] == 0 || types[i+1] == 5))\r\n # For leave-one-out plots - 5 is the overall summary\r\n rows[i+1] <- rows[i] + 2\r\n else if (types[i] == 5 && types[i+1] == 0)\r\n # For leave-one-out plots - 5 is the overall summary\r\n rows[i+1] <- rows[i] + 2\r\n else if (types[i] == 0 && (types[i+1] == 2 || types[i+1] == 4))\r\n rows[i+1] <- rows[i] + 2\r\n else if (types[i] == 0 && types[i+1] == 1 )\r\n rows[i+1] <- rows[i] + 1\r\n else if (types[i] == 1 && types[i+1] == 0 )\r\n rows[i+1] <- rows[i] + 2\r\n else if (types[i] == 1 && (types[i+1] == 2 || types[i+1] == 4))\r\n rows[i+1] <- rows[i] + 2\r\n else if (types[i] == 5) \r\n rows[i+1] <- rows[i] + 2\r\n else\r\n rows[i+1] <- rows[i] + 1\r\n }\r\n rows\r\n}\r\n\r\n#############################################\r\n# functions for drawing the forest plot #\r\n#############################################\r\n\r\ndraw.forest.plot <- function(forest.data){\r\n # Draws forest plot\r\n show.study.col <- forest.data$options$show.study.col \r\n \r\n # create graphical object for data columns.\r\n if (length(forest.data$additional.col.data)>0 ){\r\n additional.cols.grob <- forest.data$additional.cols.grob\r\n num.additional.cols <- length(additional.cols.grob)\r\n } else {\r\n num.additional.cols <- 0\r\n }\r\n rows <- forest.data$rows\r\n # Draw the text in study col and additional cols\r\n if (show.study.col==TRUE) {\r\n study.col.grob <- forest.data$study.col.grob\r\n #graphical object for study column\r\n draw.label.col(study.col.grob, 1, rows)\r\n # first two cols. are study col. and gap 1\r\n if (num.additional.cols > 0 ) {\r\n for (i in 1:num.additional.cols){\r\n draw.label.col(additional.cols.grob[[i]], 2*i+1, rows)\r\n # Note: col indices start at 3\r\n }\r\n }\r\n } else {\r\n # study col. and gap 1 not displayed\r\n if (num.additional.cols>0 ) {\r\n for (i in 1:num.additional.cols){\r\n draw.label.col(additional.cols.grob[[i]], 2*i-1, rows)\r\n # col. indices start at 1\r\n }\r\n }\r\n } \r\n\r\n if (forest.data$options$show.study.col==TRUE) {\r\n layout.pos.col <- 2*num.additional.cols + 3\r\n } else {\r\n layout.pos.col <- 2*num.additional.cols + 1\r\n # not displaying study col. or first gap.\r\n }\r\n changed.params <- draw.data.col(forest.data, j=layout.pos.col,\r\n color.overall = \"lightblue\",\r\n color.subgroup = \"yellow\",\r\n summary.line.col= \"red\",\r\n summary.line.pat = \"dashed\",\r\n diam.size = .8\r\n )\r\n changed.params\r\n}\r\n\r\n# Function to draw a cell in a text column\r\ndraw.label.col <- function(col, j, rows) {\r\n # Insert data columns from forest.data$additional.col.data into the plot\r\n # called by draw.forest.plot\r\n\r\n for (i in 1:length(rows)) {\r\n pushViewport(viewport(layout.pos.row=rows[i], layout.pos.col=j))\r\n # Labels are grobs containing their location so just\r\n # have to grid.draw() them\r\n grid.draw(col$content[[i]])\r\n popViewport()\r\n }\r\n}\r\n\r\ndraw.data.col <- function(forest.data, j, color.overall = \"black\",\r\n color.subgroup = \"black\", \r\n summary.line.col = \"darkred\",\r\n summary.line.pat = \"dashed\",\r\n diam.size) {\r\n\t\t\t\t\t \r\n # Draws the actual forest plot graph (excluding data columns)\r\n effects <- forest.data$effects\r\n plot.options <- forest.data$options\r\n plot.range <- forest.data$plot.range\r\n if (!is.null(forest.data$summary.est)) {\r\n # This is the summary estimate for loo plots. \r\n summary.est <- forest.data$summary.est \r\n } else {\r\n summary.est <- effects$ES[length(effects$ES)]\r\n }\r\n x.axis.label <- plot.options$xlabel\r\n fp.title = plot.options$fp.title\r\n user.ticks = plot.options$xticks\r\n label <- c()\r\n show.y.axis = plot.options$show.y.axis\r\n changed.params <- list()\r\n pushViewport(viewport(layout.pos.col=j, xscale=plot.range))\r\n\r\n if (show.y.axis == TRUE) {\r\n if (forest.data$scale == \"log\" && min(plot.range)<0 && max(plot.range)>0 ) {\r\n grid.lines(x=unit(0, \"native\"), y=0:1)\r\n }\r\n if (forest.data$scale == \"standard\" && min(plot.range)<0 && max(plot.range)>0 ) { \r\n grid.lines(x=unit(0, \"native\"), y=0:1)\r\n }\r\n if (forest.data$scale == \"logit\" && min(plot.range)<0 && max(plot.range)>0 ) { \r\n grid.lines(x=unit(0, \"native\"), y=0:1)\r\n }\r\n }\r\n \r\n if (forest.data$options$show.summary.line == TRUE) {\r\n # draw vertical line for summary\r\n grid.lines(x=unit(summary.est, \"native\"),\r\n y=0:1, gp=gpar(lty = summary.line.pat, col= summary.line.col))\r\n } \r\n \r\n if (forest.data$scale == \"standard\") {\r\n if (is.na(user.ticks)) {\r\n grid.xaxis(gp=gpar(cex=0.6))\r\n xaxp <- par(\"xaxp\")\r\n # Get the x ticks\r\n ticks <- seq(from=xaxp[1], to=xaxp[2], by=(xaxp[2] - xaxp[1]) / xaxp[3])\r\n } else {\r\n ticks <- user.ticks\r\n axis.range <- c(min(plot.range[1], ticks), max(plot.range[2], ticks))\r\n grid.xaxis(at = user.ticks , label = user.ticks, gp=gpar(cex=0.6))\r\n grid.xaxis(at = plot.range, label = FALSE)\r\n # Second call to grid.xaxis extends the axis to the plot range if necessary.\r\n }\r\n }\r\n \r\n if (forest.data$scale == \"log\") {\r\n log.ticks <- c()\r\n if (is.na(user.ticks[1])) { \r\n # Some cheap tricks to make the axis ticks look nice (in most cases)...\r\n # Note that \"at'' is in log scale but 'label'' is in standard scale\r\n to.make.ticks <- range(exp(plot.range))\r\n ticks <- axTicks(1, axp=c(to.make.ticks, 3), usr=c(-100, 100), log=TRUE)\r\n log.ticks <- log(ticks)\r\n log.ticks <- sort(c(log.ticks, plot.range, summary.est))\r\n lower.bound <- min(plot.range)\r\n upper.bound <- max(plot.range)\r\n\t\t log.ticks <- log.ticks[log.ticks >= lower.bound] # remember it is additive on this scale\r\n log.ticks <- log.ticks[log.ticks <= upper.bound]\r\n ticks <- exp(log.ticks)\r\n label <- round(ticks, 2)\r\n changed.params$fp_xticks <- ticks\r\n } else {\r\n\t\t ticks <- user.ticks[user.ticks > 0]\r\n # no negative tick marks in log scale\r\n if (length(ticks) > 0) {\r\n ticks <- unique(ticks)\r\n log.ticks <- log(sort(ticks))\r\n label = round(ticks, 2)\r\n axis.range <- c(min(plot.range[1], log.ticks), max(plot.range[2], log.ticks))\r\n } else {\r\n # no valid tick marks so just plot axis.\r\n log.ticks <- plot.range\r\n label <- rep(\"\", 2)\r\n }\r\n }\r\n grid.xaxis(at = log.ticks, label = label, gp=gpar(cex=0.6))\r\n grid.xaxis(at = plot.range, label = FALSE)\r\n # Second call to grid.xaxis extends the axis to the plot range if necessary.\r\n }\r\n \r\n if (forest.data$scale == \"logit\") {\r\n if (is.na(user.ticks)) { \r\n lb <- min(plot.range)\r\n ub <- max(plot.range)\r\n to.make.ticks <- c(lb, ub)\r\n ticks <- axTicks(1, axp=c(to.make.ticks, 4))\r\n changed.params$fp_xticks <- ticks\r\n } else {\r\n\t\t ticks <- user.ticks\r\n }\r\n grid.xaxis(at = ticks , label = round(ticks, 2), gp=gpar(cex=0.6))\r\n } \r\n \r\n if (forest.data$scale == \"arcsine\") {\r\n if (is.na(user.ticks)) { \r\n lb <- min(plot.range)\r\n ub <- max(plot.range)\r\n to.make.ticks <- c(lb, ub)\r\n ticks <- axTicks(1, axp=c(to.make.ticks, 4))\r\n changed.params$fp_xticks <- ticks\r\n } else {\r\n ticks <- user.ticks\r\n }\r\n grid.xaxis(at = ticks , label = round(ticks, 2), gp=gpar(cex=0.6))\r\n }\r\n \r\n grid.text(x.axis.label, y=unit(-2, \"lines\"), gp=gpar(cex=0.8))\r\n data.col.width <- forest.data$data.col.width\r\n # Width of data cols., not including study column or forest plot.\r\n rows <- forest.data$rows[-1]\r\n types <- forest.data$types[-1]\r\n num.rows <- rows[length(rows)]\r\n grid.text(fp.title, x=unit(-data.col.width, \"inches\"), y=unit(num.rows + 2, \"lines\"), gp=gpar(cex=1.0), just=\"left\")\r\n popViewport()\r\n box.sizes <- calc.box.sizes(forest.data, box.sca=0.8)\r\n # Sizes of boxes (or diamonds) in plot\r\n for (i in 1:length(rows)) {\r\n pushViewport(viewport(layout.pos.row=rows[i], layout.pos.col=j,\r\n xscale=plot.range)) \r\n if (types[i] == 0){\r\n draw.normal.CI(effects$LL[i], effects$ES[i], effects$UL[i], box.sizes[i])\r\n }\r\n else if (types[i] == 1){\r\n draw.summary.CI(effects$LL[i], effects$ES[i], effects$UL[i], box.sizes[i], color.subgroup, diam.size )\r\n }\r\n else if (types[i] == 2){\r\n draw.summary.CI(effects$LL[i], effects$ES[i], effects$UL[i], box.sizes[i], color.overall, diam.size )\r\n }\r\n else if (types[i] == 5){\r\n draw.summary.CI.no.scaled.diamond(effects$LL[i], effects$ES[i], effects$UL[i], box.sizes[i], color.overall, diam.size, plot.range)\r\n }\r\n popViewport()\r\n }\r\n \r\n changed.params\r\n}\r\n\r\ncalc.tick.marks <- function(plot.range, scale) {\r\n if (scale == \"log\") {\r\n if (is.na(user.ticks)) { \r\n # some cheap tricks to make the axis ticks look nice (in most cases)...\r\n # Note that at is in log scale but label is in standard scale\r\n to.make.ticks <- range(exp(plot.range))\r\n ticks <- axTicks(1, axp=c(to.make.ticks, 3), usr=c(-100, 100), log=TRUE)\r\n calc.ticks <- log(ticks)\r\n \r\n lower.bound <- min(plot.range)\r\n upper.bound <- max(plot.range)\r\n # find the largest tick mark less than the lower bound of plot.range, if there is one.\r\n if (calc.ticks[1] <= lower.bound) {\r\n min.tick <- max(calc.ticks[calc.ticks <= lower.bound])\r\n }\r\n # find the smallest tick mark greater than the upper bound of plot.range, if there is one.\r\n if (calc.ticks[length(calc.ticks)] >= upper.bound) {\r\n max.tick <- min(calc.ticks[calc.ticks >= upper.bound])\r\n }\r\n \t calc.ticks <- calc.ticks[calc.ticks >= min.tick] # remember it is additive on this scale\r\n calc.ticks <- calc.ticks[calc.ticks <= max.tick]\r\n ticks <- exp(calc.ticks)\r\n changed.params$fp_xticks <- ticks\r\n } else {\r\n\t\t ticks <- user.ticks\r\n calc.ticks <- log(user.ticks)\r\n }\r\n grid.xaxis(at = calc.ticks , label = round(ticks, 3), gp=gpar(cex=0.6)) \r\n } \r\n if (scale == \"logit\") {\r\n if (is.na(user.ticks)) { \r\n lb <- min(plot.range)\r\n ub <- max(plot.range)\r\n to.make.ticks <- c(lb, ub)\r\n ticks <- axTicks(1, axp=c(to.make.ticks, 4))\r\n ticks <- c(ticks, summary.est)\r\n changed.params$fp_xticks <- ticks\r\n } else {\r\n\t\t ticks <- user.ticks\r\n }\r\n grid.xaxis(at = ticks , label = round(ticks, 3), gp=gpar(cex=0.6))\r\n } \r\n calc.ticks\r\n}\r\n\r\ncalc.box.sizes <- function(forest.data, box.sca = 1) {\r\n # Calculates sizes for c.i. boxes and diamonds in forest plot.\r\n \t\r\n # weights for the boxes\r\n # note that 1.96 is a convention [not necessary for the scaling]\r\n # the analysis functions determine the CI width (e.g. 95% or 99%)\r\n # this is just scaling the boxes according to the SE\r\n\t# CHANGED as part of issue # 214\r\n\tmult <- get.mult.from.conf.level()\r\n precision <- NULL\r\n user.lb <- NULL\r\n user.ub <- NULL\r\n effects <- forest.data$effects\r\n # i have kept the \"ifs\" below: when we decide to include more metrics\r\n # these will be expanded\r\n \r\n if (forest.data$scale == \"log\"){\r\n precision <- sqrt(1 / ((effects$UL - effects$LL)/(2*mult)))\r\n } else if (forest.data$scale == \"standard\") {\r\n precision <- sqrt(1 / ((effects$UL - effects$LL)/(2*mult)))\r\n } else if (forest.data$scale == \"logit\") {\r\n precision <- sqrt(1 / ((effects$UL - effects$LL)/(2*mult)))\r\n } else if (forest.data$scale == \"arcsine\") {\r\n precision <- sqrt(1 / ((effects$UL - effects$LL)/(2*mult)))\r\n }\r\n box.sizes <- box.sca * precision/max(precision)\r\n # sizes of the boxes in the forest plot - proportional to width of CI\r\n}\r\n \r\ndraw.normal.CI <- function(LL, ES, UL, size) {\r\n # draws a non-summary rect-plus-CI\r\n # \"native\" units to position relative to\r\n # the x-axis scale, and \"snpc\" units to size relative to\r\n # the height of the row\r\n # (\"snpc\" stands for \"square normalised parent coordinates\"\r\n # which means that the value is calculated as a proportion\r\n # of the width and height of the current viewport and the\r\n # physically smaller of these is used)\r\n # called by draw.forest.plot\r\n grid.rect(x=unit(ES, \"native\"),\r\n width=unit(size, \"snpc\"), height=unit(size, \"snpc\"),\r\n gp=gpar(fill=\"black\"))\r\n # Draw arrow if exceed col range\r\n # convertX() used to convert between coordinate systems\r\n \r\n# TO DO: there is one case where this is a problem, when the summary estimate is wider than the CI\r\n# this can happen when the summary is calculated in a subgroup where there is only one study\r\n# this should be handled by another \"if\" that forces the xscale to be determined \"primarily\" by the CI of the summaries\r\n# this has to be done in the function above\r\n\r\n if ((convertX(unit(UL, \"native\"), \"npc\", valueOnly=TRUE) > 1) && (convertX(unit(LL, \"native\"), \"npc\", valueOnly=TRUE) >= 0)) {\r\n # this line is too long on the right - draw a right arrow from LL to 1 (in approriate coords.) \r\n #grid.arrows(x=unit(c(LL, 1), c(\"native\", \"npc\")), length=unit(0.05, \"inches\"))\r\n\tgrid.lines(x=unit(c(LL, 1), c(\"native\", \"npc\")), arrow=arrow(length=unit(0.05, \"inches\")), y=0.5)\r\n\t\r\n }\r\n else if ((convertX(unit(UL, \"native\"), \"npc\", valueOnly=TRUE) <= 1) && (convertX(unit(LL, \"native\"), \"npc\", valueOnly=TRUE) < 0)) {\r\n # this line is too long on the left - draw a left arrow from UL to 0 (in approriate coords.)\r\n #grid.arrows(x=unit(c(UL, 0), c(\"native\", \"npc\")), length=unit(0.05, \"inches\"))\r\n\tgrid.lines(x=unit(c(UL, 0), c(\"native\", \"npc\")), arrow=arrow(length=unit(0.05, \"inches\")), y=0.5)\r\n\t\r\n }\r\n else if ((convertX(unit(UL, \"native\"), \"npc\", valueOnly=TRUE) > 1) && (convertX(unit(LL, \"native\"), \"npc\", valueOnly=TRUE) < 0)) {\r\n # this line is too long on both sides - draw a left arrow from ES to 0 and a right arrow from ES to 1 (in approriate coords.)\r\n #grid.arrows(x=unit(c(ES, 0), c(\"native\", \"npc\")), length=unit(0.05, \"inches\"))\r\n\tgrid.lines(x=unit(c(ES, 0), c(\"native\", \"npc\")), arrow=arrow(length=unit(0.05, \"inches\")), y=0.5)\r\n #grid.arrows(x=unit(c(ES, 1), c(\"native\", \"npc\")), length=unit(0.05, \"inches\"))\r\n\tgrid.lines(x=unit(c(ES, 1), c(\"native\", \"npc\")), arrow=arrow(length=unit(0.05, \"inches\")), y=0.5)\r\n\t\r\n }\r\n else {\r\n # this line is too short - draw white if totally inside rect\r\n line.col <- if ((convertX(unit(ES, \"native\") + unit(0.5*size, \"lines\"),\r\n \"native\", valueOnly=TRUE) > UL) &&\r\n (convertX(unit(ES, \"native\") - unit(0.5*size, \"lines\"),\r\n \"native\", valueOnly=TRUE) < LL))\r\n \"white\"\r\n else\r\n # this line is just right\r\n \"black\"\r\n grid.lines(x=unit(c(LL, UL), \"native\"), y=0.5,\r\n gp=gpar(col=line.col))\r\n }\r\n}\r\n\r\n# Function to draw a summary \"diamond\" as wide as confidence interval\r\ndraw.summary.CI <- function(LL, ES, UL, size, color, diam.height) {\r\n # for diamonds: using half the height of the equivalent rect\r\n grid.polygon(x=unit(c(LL, ES, UL, ES), \"native\"),\r\n y=unit(0.5 + c(0, 0.35*diam.height, 0, -0.35*diam.height), \"npc\"), gp=gpar(fill=color))\r\n}\r\n\r\ndraw.summary.CI.no.scaled.diamond <- function(LL, ES, UL, size, color, diam.height, plot.range) {\r\n # draws a summary-CI without scaling on the width of the diamond\r\n # \"native\" units to position relative to\r\n # the x-axis scale, and \"snpc\" units to size relative to\r\n # the height of the row\r\n # (\"snpc\" stands for \"square normalised parent coordinates\"\r\n # which means that the value is calculated as a proportion\r\n # of the width and height of the current viewport and the\r\n # physically smaller of these is used)\r\n # called by draw.forest.plot\r\n #if (scale == \"log\") {\r\n # diam.width <- convertX(unit(diam.height, \"snpc\"), \"native\", valueOnly=TRUE)\r\n #} else {\r\n # diam.width <- 0.5*convertX(unit(diam.height, \"snpc\"), \"native\", valueOnly=TRUE)\r\n #}\r\n plot.width <- plot.range[2] - plot.range[1]\r\n grid.polygon(x=unit(c(ES-plot.width/30, ES, ES+plot.width/30, ES), \"native\"),\r\n y=unit(0.5 + c(0, 0.5*diam.height, 0, -0.5*diam.height), \"npc\"), gp=gpar(fill=color))\r\n if ((convertX(unit(UL, \"native\"), \"npc\", valueOnly=TRUE) > 1) && (convertX(unit(LL, \"native\"), \"npc\", valueOnly=TRUE) >= 0)) {\r\n # this line is too long on the right - draw a right arrow from LL to 1 (in approriate coords.) \r\n #grid.arrows(x=unit(c(LL, 1), c(\"native\", \"npc\")), length=unit(0.05, \"inches\"))\r\n\tgrid.lines(x=unit(c(LL, 1), c(\"native\", \"npc\")), arrow=arrow(length=unit(0.05, \"inches\")), y=0.5)\r\n }\r\n else if ((convertX(unit(UL, \"native\"), \"npc\", valueOnly=TRUE) <= 1) && (convertX(unit(LL, \"native\"), \"npc\", valueOnly=TRUE) < 0)) {\r\n # this line is too long on the left - draw a left arrow from UL to 0 (in approriate coords.)\r\n #grid.arrows(x=unit(c(UL, 0), c(\"native\", \"npc\")), length=unit(0.05, \"inches\"))\r\n\tgrid.lines(x=unit(c(UL, 0), c(\"native\", \"npc\")), arrow=arrow(length=unit(0.05, \"inches\")), y=0.5)\r\n }\r\n else if ((convertX(unit(UL, \"native\"), \"npc\", valueOnly=TRUE) > 1) && (convertX(unit(LL, \"native\"), \"npc\", valueOnly=TRUE) < 0)){\r\n # this line is too long on both sides - draw a left arrow from ES to 0 and a right arrow from ES to 1 (in approriate coords.)\r\n #grid.arrows(x=unit(c(ES, 0), c(\"native\", \"npc\")), length=unit(0.05, \"inches\"))\r\n\tgrid.lines(x=unit(c(ES, 0), c(\"native\", \"npc\")), arrow=arrow(length=unit(0.05, \"inches\")), y=0.5)\r\n\t\r\n #grid.arrows(x=unit(c(ES, 1), c(\"native\", \"npc\")), length=unit(0.05, \"inches\"))\r\n\tgrid.lines(x=unit(c(ES, 1), c(\"native\", \"npc\")), arrow=arrow(length=unit(0.05, \"inches\")), y=0.5)\r\n }\r\n else {\r\n # this line is too short - draw white if totally inside rect\r\n line.col <- if ((convertX(unit(ES, \"native\") + unit(0.5*size, \"lines\"), \"native\", valueOnly=TRUE) > UL) &&\r\n (convertX(unit(ES, \"native\") - unit(0.5*size, \"lines\"), \"native\", valueOnly=TRUE) < LL))\r\n \"white\"\r\n else\r\n # this line is just right\r\n \"black\"\r\n grid.lines(x=unit(c(LL, UL), \"native\"), y=0.5,\r\n gp=gpar(col=line.col))\r\n }\r\n} \r\n\r\ncreate.plot.options <- function(forest.data, gapSize, plotWidth) {\r\n # This function is unrelated to the user options that are passed in\r\n # via forest.data$options. It just specifies gapSize (space between columns) and plotWidth (width of effect size col.).\r\n # This function is only called by calc.viewport.layout and calc.forest.plot.size.\r\n effect.col.width <- unit(plotWidth, \"inches\")\r\n # width of the forest plot\r\n forest.params = list(\r\n col.gap = unit(gapSize, \"mm\"),\r\n effect.col.width = effect.col.width\r\n )\r\n forest.params\r\n}\r\n\r\n#######################################\r\n# two forest plots #\r\n#######################################\r\n \r\ntwo.forest.plots <- function(forest.data, outpath) {\r\n png(filename=paste(\"r_tmp\",\"INTER\",sep=\"/\")) # to fix windows popping out at you issue\r\n\t\r\n # draw two forest plots side by side.\r\n forest.data1 <- forest.data$left\r\n forest.data2 <- forest.data$right\r\n forest.data1 <- format.data.cols(forest.data1)\r\n types1 <- forest.data1$types\r\n num.labels1 <- length(forest.data1$label)\r\n rows1 <- assign.rows(types1, num.labels1)\r\n # row numbers of forest plot including blank rows (after summary rows)\r\n forest.data1$rows <- rows1\r\n forest.data1 <- create.grobs(forest.data1)\r\n forest.data2 <- format.data.cols(forest.data2)\r\n types2 <- forest.data2$types\r\n num.labels2 <- length(forest.data2$label)\r\n rows2 <- assign.rows(types2, num.labels2)\r\n # row numbers of forest plot including blank rows (after summary rows)\r\n forest.data2$rows <- rows2\r\n forest.data2 <- create.grobs(forest.data2)\r\n # create graphical objects for study and data columns.\r\n plot.size1 <- calc.forest.plot.size(forest.data1)\r\n forest.data1$data.col.width <- plot.size1$data.col.width\r\n plot.size2 <- calc.forest.plot.size(forest.data2)\r\n forest.data2$data.col.width <- plot.size2$data.col.width\r\n # calculate heights and widths of plots\r\n viewport.layout1 <- calc.viewport.layout(forest.data1, just=\"left\") \r\n platform <- Sys.info()\r\n viewport.layout2 <- calc.viewport.layout(forest.data2, just=\"left\")\r\n \r\n # calculate layouts of plots\r\n how.wide1 <- plot.size1$how.wide\r\n how.wide2 <- plot.size2$how.wide\r\n width <- how.wide1 + how.wide2\r\n how.tall1 <- plot.size1$how.tall\r\n how.tall2 <- plot.size2$how.tall\r\n how.tall <- max(how.tall1, how.tall2)\r\n\r\n if (platform[[1]]==\"Windows\") {\r\n x.pos <- 1 + (how.wide1 - how.wide2) / (4 * how.wide1)\r\n } else {\r\n x.pos <- 1 + (how.wide1 - how.wide2) / how.wide1\r\n }\r\n if (length(grep(\".png\", outpath)) != 0){\r\n png(file=outpath, width = how.wide1 + how.wide2, height = how.tall+1 , units = \"in\", res = 144) \r\n } else{\r\n pdf(file=outpath, width = how.wide1 + how.wide2 + 1, height = how.tall+2) \r\n }\r\n pushViewport(viewport(layout=grid.layout(1,2, widths=unit(c(how.wide1, how.wide2), c(\"in\", \"in\")))))\r\n pushViewport(viewport(layout=viewport.layout1, layout.pos.col=1))\r\n changed.params <- draw.forest.plot(forest.data1) \r\n # Only saving params changes for the left forest plot, because currently plot edit\r\n # can't handle two sets of params values for xticks or plot bounds.\r\n # Could be changed in future.\r\n popViewport()\r\n pushViewport(viewport(layout=viewport.layout2, layout.pos.col=2))\r\n draw.forest.plot(forest.data2)\r\n popViewport(2)\r\n graphics.off()\r\n changed.params\r\n}\r\n\r\n#######################################\r\n# meta-regression scatter #\r\n#######################################\r\nmeta.regression.plot <- function(plot.data, outpath) {\r\n\tpng(filename=paste(\"r_tmp\",\"INTER\",sep=\"/\")) # to fix windows popping out at you issue\r\n\r\n lweight = 1\r\n lpattern = \"solid\"\r\n lcol = \"blue\"\r\n ES <- plot.data$effects$ES\r\n se <- plot.data$effects$se\r\n # make the data data.frame\r\n data.reg <- data.frame(plot.data$effects, types=plot.data$types)\r\n # data for plot (only keep the studies - not the summaries)\r\n data.reg <- subset(data.reg, types==0)\r\n cov.name <- plot.data$covariate$varname\r\n cov.values <- plot.data$covariate$values\r\n x.range.min <- min(cov.values)\r\n x.range.max <- max(cov.values)\r\n x.range <- x.range.max - x.range.min\r\n x.min <- x.range.min - (x.range / 5)\r\n x.max <- x.range.max + (x.range / 5)\r\n y.range.min <- min(ES)\r\n y.range.max <- max(ES)\r\n y.range <- y.range.max - y.range.min\r\n y.min <- y.range.min - (y.range / 5)\r\n y.max <- y.range.max + (y.range / 5)\r\n\r\n if (length(grep(\".png\", outpath)) != 0){\r\n png(file=outpath, width=10 , height=5, units=\"in\", res=144)\r\n } else {\r\n pdf(file=outpath, width=10 , height=5)\r\n }\r\n\r\n plot(y = data.reg$ES, x=cov.values,\r\n xlim=c(x.min, x.max),\r\n ylim=c(y.min, y.max),\r\n xlab=plot.data$xlabel, \r\n ylab=plot.data$ylabel, type='n')\r\n symbols(y = data.reg$ES, x=cov.values, \r\n circles = 1 / data.reg$se,\r\n inches=.3, \r\n bty=plot.data$plotregion, add=TRUE)\r\n if (plot.data$regline) {\r\n x<-c(x.range.min, x.range.max)\r\n y<-c (plot.data$fitted.line$intercept + \r\n x.range.min*plot.data$fitted.line$slope, plot.data$fitted.line$intercept + \r\n x.range.max*plot.data$fitted.line$slope)\r\n lines(x, y, col=lcol, lwd=lweight, lty=lpattern)\r\n }\r\n # write the plot data out to disk\r\n graphics.off()\r\n}\r\n\r\n######################################\r\n# Diagnostic SROC #\r\n######################################\r\nsroc.plot <- function(plot.data, outpath){\r\n\tpng(filename=paste(\"r_tmp\",\"INTER\",sep=\"/\")) # to fix windows popping out at you issue\r\n\t\r\n # draw an SROC plot.\r\n lcol <- \"blue\"\r\n sym.size <- .03\r\n lweight = 1\r\n lpatern = \"solid\"\r\n plotregion = \"n\"\r\n fitted.line <- plot.data$fitted.line\r\n weighted <- plot.data$weighted\r\n TPR <- plot.data$TPR\r\n FPR <- plot.data$FPR\r\n xlab=\"1 - Specificity\" \r\n ylab=\"Sensitivity\"\r\n s.range <- plot.data$s.range\r\n if (length(grep(\".png\", outpath)) != 0){\r\n png(file=outpath, height=5, width=5, units=\"in\", res=144)\r\n } else {\r\n pdf(file=outpath, height=5, width=5)\r\n }\r\n plot(y = NULL, x=NULL, xlim=c(0, 1),\r\n ylim=c(0, 1),\r\n xlab=xlab, \r\n ylab=ylab, \r\n asp=1,\r\n type='n')\r\n symbols(y = plot.data$TPR, x = plot.data$FPR,\r\n bty = plotregion, circles=rep(1, length(TPR)), col = \"black\", inches=sym.size, add=TRUE)\r\n \r\n # create regression line values\r\n s.vals <- seq(from = s.range$min, to = s.range$max, by=.001)\r\n reg.line.vals <- fitted.line$intercept + fitted.line$slope * s.vals\r\n std.err <- plot.data$std.err\r\n mult <- plot.data$mult\r\n upper.ci.vals <- reg.line.vals + mult * std.err\r\n lower.ci.vals <- reg.line.vals - mult * std.err\r\n # transform regression line coords to TPR by 1 - FPR coords\r\n reg.line.vals.trans <- invlogit((s.vals + reg.line.vals) / 2)\r\n s.vals.trans <- invlogit((s.vals - reg.line.vals) / 2)\r\n \r\n lines(s.vals.trans, reg.line.vals.trans, col = lcol, lwd = lweight, lty = lpatern)\r\n upper.ci.vals.trans <- invlogit((s.vals + upper.ci.vals))\r\n lower.ci.vals.trans <- invlogit((s.vals + lower.ci.vals))\r\n graphics.off()\r\n}\r\n\r\n################################################\r\n# Diagnostic PPV and NPV by Prevalence #\r\n################################################\r\n\r\ncompute.ppv <- function(sens, spec, prev) {\r\n npv <- sens * prev / (sens * prev + (1 - spec) * (1 - prev))\r\n}\r\n\r\ncompute.npv <- function(sens, spec, prev) {\r\n ppv <- spec * (1 - prev) / (spec * (1 - prev) + (1 - sens) * prev)\r\n}\r\n\r\nplot.ppv.npv.by.prev <- function(diagnostic.data, params) {\r\n params$measure <- \"Sens\"\r\n diagnostic.data.sens <- compute.diag.point.estimates(diagnostic.data, params)\r\n params$measure <- \"Spec\"\r\n diagnostic.data.spec <- compute.diag.point.estimates(diagnostic.data, params)\r\n params$measure <- \"NPV\"\r\n diagnostic.data.npv <- compute.diag.point.estimates(diagnostic.data, params)\r\n params$measure <- \"PPV\"\r\n diagnostic.data.ppv <- compute.diag.point.estimates(diagnostic.data, params)\r\n \r\n prev <- ((diagnostic.data@TP + diagnostic.data@FN) / \r\n (diagnostic.data@TP + diagnostic.data@FN + diagnostic.data@FP + diagnostic.data@TN))\r\n prev.min <- min(prev)\r\n prev.max <- max(prev)\r\n npv <- diagnostic.data.npv@y\r\n npv <- diagnostic.transform.f(\"NPV\")$display.scale(npv)\r\n ppv <- diagnostic.data.ppv@y\r\n ppv <- diagnostic.transform.f(\"PPV\")$display.scale(ppv)\r\n \r\n plot(0:1, 0:1, type=\"n\",main=\"PPV and NPV by Prevalence\", xlab=\"Prevalence\", ylab=\"\")\r\n points(prev, npv, col=3,)\r\n points(prev, ppv, col=4)\r\n legend(\"right\", c(\"Negative predictive value\", \"Positive predictive value\"), bty=\"n\", col=c(3,4), text.col=c(3,4), pch=c(1,1))\r\n\r\n res.sens <- rma.uni(yi=diagnostic.data.sens@y, sei=diagnostic.data.sens@SE, \r\n [email protected],\r\n method=\"FE\", level=params$conf.level,\r\n digits=params$digits)\r\n res.spec <- rma.uni(yi=diagnostic.data.spec@y, sei=diagnostic.data.spec@SE, \r\n [email protected],\r\n method=\"FE\", level=params$conf.level,\r\n digits=params$digits) \r\n sens.est <- diagnostic.transform.f(\"Sens\")$display.scale(res.sens$b[1])\r\n spec.est <- diagnostic.transform.f(\"Spec\")$display.scale(res.spec$b[1])\r\n prev.overall <- seq(from=prev.min, to=prev.max, by=.01)\r\n sens.overall <- rep(sens.est, length(prev.overall))\r\n spec.overall <- rep(spec.est, length(prev.overall))\r\n npv.overall <- compute.npv(sens.overall, spec.overall, prev.overall)\r\n ppv.overall <- compute.ppv(sens.overall, spec.overall, prev.overall)\r\n lines(prev.overall, npv.overall, col=3)\r\n lines(prev.overall, ppv.overall, col=4)\r\n}\r\n#######################################################\r\n# Functions for formatting data for display in plots #\r\n#######################################################\r\n\r\nformat.data.cols <- function(plot.data) {\r\n # formats data columns for display on forest plot\r\n options <- plot.data$options\r\n types <- plot.data$types\r\n if (options$show.col2==TRUE) {\r\n \r\n y.disp <- plot.data$effects.disp$y.disp\r\n lb.disp <- plot.data$effects.disp$lb.disp\r\n ub.disp <- plot.data$effects.disp$ub.disp\r\n effect.sizes <- format.effect.sizes(y=y.disp, lb=lb.disp, ub=ub.disp, options)\r\n # first row contains headers, so add label\r\n effect.size.label <- create.effect.size.label(effect.sizes, options)\r\n effect.size.col <- c(effect.size.label,\r\n paste(effect.sizes$y.display, effect.sizes$lb.display, \",\", \r\n effect.sizes$ub.display, \")\", sep = \"\"))\r\n # replace data for type 4 rows with empty strings. Type 4 rows are empty rows in the forest plot (for vertical alignment only).\r\n effect.size.col[types==4] <- \"\"\r\n plot.data$additional.col.data$es <- effect.size.col\r\n } \r\n if ((options$show.col3==TRUE) && (!is.null(plot.data$col3))) {\r\n label <- options$col3.str\r\n data.col <- format.raw.data.col(nums = plot.data$col3$nums, denoms = plot.data$col3$denoms, label = label, types=types) \r\n plot.data$additional.col.data$cases = data.col\r\n }\r\n if ((options$show.col4==TRUE) && (!is.null(plot.data$col4))) {\r\n label <- options$col4.str\r\n data.col <- format.raw.data.col(nums = plot.data$col4$nums, denoms = plot.data$col4$denoms, label = label, types=types) \r\n plot.data$additional.col.data$controls = data.col\r\n }\r\n plot.data\r\n}\r\n\r\nformat.effect.sizes <- function(y, lb, ub, options) {\r\n # format column by padding entries with spaces for alignment\r\n digits <- options$digits\r\n y.display <- sprintf(paste(\"%.\", digits,\"f\", sep=\"\"), y)\r\n lb.display <- sprintf(paste(\"%.\", digits,\"f\", sep=\"\"), lb)\r\n ub.display <- sprintf(paste(\"%.\", digits,\"f\", sep=\"\"), ub)\r\n \r\n # for ub, add an extra space to positive numbers for alignment (negative numbers display minus sign)\r\n if (length(ub.display[ub.display >= 0])) {\r\n ub.display[ub.display >= 0] <- mapply(pad.with.spaces, ub.display[ub.display >= 0], begin.num=1, end.num=0)\r\n }\r\n # format results by padding with spaces to align columns \r\n ub.max.chars <- max(nchar(ub.display))\r\n ub.extra.space <- ub.max.chars - nchar(ub.display)\r\n ub.display <- mapply(pad.with.spaces, ub.display, begin.num = ub.extra.space, end.num=0)\r\n # for ub, add an extra space to positive numbers for alignment (negative numbers display minus sign)\r\n if (length(ub.display[ub.display >= 0])) {\r\n ub.display[ub.display >= 0] <- mapply(pad.with.spaces, ub.display[ub.display >= 0], begin.num=1, end.num=0)\r\n }\r\n # if ub has any negative entries, add an extra space to separate entry from preceding \",\"\r\n if (min(ub) < 0) {\r\n ub.display <- paste(\" \", ub.display, sep=\"\")\r\n }\r\n lb.display <- paste(\" (\", lb.display, sep=\"\")\r\n lb.max.chars <- max(nchar(lb.display))\r\n lb.extra.space <- lb.max.chars - nchar(lb.display)\r\n lb.display <- mapply(pad.with.spaces, lb.display, begin.num = lb.extra.space, end.num=0)\r\n effect.sizes <- list(\"y.display\"=y.display, \"lb.display\"=lb.display, \"ub.display\"=ub.display)\r\n}\r\n\r\ncreate.effect.size.label <- function(effect.sizes, options) {\r\n # Add label to effect.size.column and align the comma if the label\r\n # is of the form ES(LL, UL), with the data entries below it. Since the default label\r\n # is no longer of that form, this function could be removed.\r\n col2.label <- as.character(options$col2.str)\r\n # if label contains \",\", pad label to align columns\r\n label.info <- check.label(label = col2.label, split.str = \",\")\r\n max.chars <- max(nchar(effect.sizes$ub.display)) + 1\r\n # add 1 because a space is added before each ub entry.\r\n if (label.info$contains.symbol == TRUE) {\r\n # Label contains \",\" so pad label to align \",\"\r\n # we're assuming that there is a single space after \",\"\r\n col2.label.padded <- pad.with.spaces(col2.label, begin.num=0, end.num = max.chars - label.info$end.string.length) \r\n } else {\r\n # label doesn't contain \",\" so pad label to center over column \r\n col2.width <- max(nchar(effect.sizes$y.disp) + nchar(effect.sizes$lb.disp) + nchar(effect.sizes$ub.disp))\r\n if (col2.width > nchar(col2.label)) {\r\n # width of data greater than the length of col. label \r\n col2.label.padded <- pad.with.spaces(col2.label, begin.num=0, end.num = floor((col2.width - nchar(col2.label)) / 2)) \r\n } else {\r\n # don't pad with spaces\r\n col2.label.padded <- col2.label\r\n }\r\n }\r\n col2.label.padded\r\n}\r\n \r\nformat.raw.data.col <- function(nums, denoms, label, types) {\r\n # format raw data columns to align forward slashes\r\n types.short <- types[types %in% c(0,1)]\r\n # remove types 3 (labels) and 2 (overall total) if present\r\n nums.total <- sum(nums[types.short==0])\r\n denoms.total <- sum(denoms[types.short==0])\r\n # only sum over types==0 (individual studies)\r\n max.chars <- nchar(denoms.total) + 1\r\n # add 1 because a space is added before each denom.\r\n overall.row <- paste(nums.total, \"/\", denoms.total, sep = \"\")\r\n label.info <- check.label(label, split.str = \"/\")\r\n if (label.info$contains.symbol == TRUE) {\r\n # pad label or denoms.total to align \"/\"\r\n # we're assuming that there is a single space after \"/\".\r\n end.string.length <- label.info$end.string.length\r\n label.padded <- pad.with.spaces(label, begin.num=0, end.num = max.chars - end.string.length - 1)\r\n overall.row <- pad.with.spaces(overall.row, begin.num=0, end.num = end.string.length - max.chars)\r\n max.chars <- max(max.chars, end.string.length) \r\n } else {\r\n # pad label to center above column\r\n label.padded <- pad.with.spaces(label, begin.num=0, end.num = floor((nchar(overall.row) - nchar(label)) / 2))\r\n }\r\n # pad data row to align forward slashes\r\n denoms <- mapply(pad.with.spaces, denoms, begin.num=0, end.num = max.chars - (nchar(denoms) + 1))\r\n # add 1 to nchar(denoms) because a space is added before each denom\r\n data.column = c(label.padded, paste(nums, \"/\", denoms, sep = \"\"), overall.row)\r\n data.column\r\n}\r\n\r\ncheck.label <- function(label, split.str) {\r\n # utility for format.effect.size.col and format.raw.data.col\r\n # check column labels for split.symbol and return length of string that follows split.str\r\n split.label <- strsplit(label, split.str)\r\n split.label.length <- length(split.label[[1]])\r\n label.info <- list(\"contains.symbol\"=FALSE, \"end.string.length\"=0)\r\n if (split.label.length > 1) {\r\n label.info$contains.symbol <- TRUE\r\n label.info$end.string.length <- nchar(split.label[[1]][split.label.length])\r\n }\r\n label.info\r\n} \r\n \r\ncalculate.radii <- function(plot.data, inv.var, max.symbol.size, max.ratio) {\r\n # calculates radii of symbols for a meta-regression plot\r\n # using a scaling function f(x) = C * x^e.\r\n # inv.var is a vector of inverse variances,\r\n # max.symbol.size is the maximum size for a symbol, and max.ratio is the maximum ratio of symbol sizes.\r\n ES <- plot.data$effects$ES\r\n inv.var <- (plot.data$effects$se)^2\r\n cov.values <- plot.data$covariate$values\r\n x.range.min <- min(cov.values)\r\n x.range.max <- max(cov.values)\r\n x.range <- x.range.max - x.range.min\r\n y.range.min <- min(ES)\r\n y.range.max <- max(ES)\r\n y.range <- y.range.max - y.range.min\r\n min.range <- min(x.range, y.range)\r\n inv.var.min <- min(inv.var)\r\n inv.var.max <- max(inv.var)\r\n inv.var.ratio <- inv.var.max / inv.var.min\r\n radius.max <- min.range / 10\r\n radii <- (radius.max / inv.var.max) * inv.var\r\n}" }, { "alpha_fraction": 0.32203391194343567, "alphanum_fraction": 0.35593220591545105, "avg_line_length": 14.090909004211426, "blob_id": "cb8bffa54da0b0ba48b63e499f94fb6e8880f26a", "content_id": "a047678b79bd3e9d4df8c34be3c875f0fa7814bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 177, "license_type": "no_license", "max_line_length": 32, "num_lines": 11, "path": "/src/R/HSROC/R/REFSTD_4.R", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "REFSTD_4 <-\r\nfunction (rs, n.sample, n_rs) \r\n{\r\n if (rs[[1]] == 1) {\r\n x = rep(1, n_rs)\r\n }\r\n else {\r\n x = rep(1:rs[[1]], n_rs)\r\n }\r\n return(x)\r\n}\r\n" }, { "alpha_fraction": 0.4259776473045349, "alphanum_fraction": 0.46648043394088745, "avg_line_length": 35.68421173095703, "blob_id": "ff0320f0307eeacc83d1fdb734f1d722e7a30dc5", "content_id": "23d3e59bbc0186e1f3b08413f068e00f5ac1bf93", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 716, "license_type": "no_license", "max_line_length": 66, "num_lines": 19, "path": "/src/R/HSROC/R/REFSTD_2.R", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "REFSTD_2 <-\r\nfunction (n_rs, likelihood, prior) \r\n{\r\n if (n_rs[[1]] != 1) {\r\n x = n_rs[[1]]\r\n a.Se = likelihood[[1]] + likelihood[[4]] + prior[[1]]\r\n b.Se = likelihood[[2]] + likelihood[[3]] + prior[[2]]\r\n a.Sp = likelihood[[6]] + likelihood[[7]] + prior[[3]]\r\n b.Sp = likelihood[[5]] + likelihood[[8]] + prior[[4]]\r\n }\r\n else {\r\n x = 1\r\n a.Se = sum(likelihood[[1]] + likelihood[[4]]) + prior[[1]]\r\n b.Se = sum(likelihood[[2]] + likelihood[[3]]) + prior[[2]]\r\n a.Sp = sum(likelihood[[6]] + likelihood[[7]]) + prior[[3]]\r\n b.Sp = sum(likelihood[[5]] + likelihood[[8]]) + prior[[4]]\r\n }\r\n return(list(x, a.Se, b.Se, a.Sp, b.Sp))\r\n}\r\n" }, { "alpha_fraction": 0.5437821745872498, "alphanum_fraction": 0.5518763661384583, "avg_line_length": 33.64102554321289, "blob_id": "aa5215381f3782ecdaee4c830ea13ead283b62b0", "content_id": "01df8887a16210b199ded89e16b9d3ed871339b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1359, "license_type": "no_license", "max_line_length": 70, "num_lines": 39, "path": "/src/qconsole.py", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "from PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\n\nSTART_COLUMN = 3\n\nclass QConsole(QTextEdit):\n \n def __init__(self, parent):\n super(QConsole, self).__init__(parent)\n self.parent = parent\n \n def keyPressEvent(self, event):\n if event.key() == Qt.Key_Return:\n self.emit(SIGNAL(\"returnPressed()\"))\n elif event.key() == Qt.Key_Up:\n self.emit(SIGNAL(\"upArrowPressed()\"))\n elif event.key() == Qt.Key_Down:\n self.emit(SIGNAL(\"downArrowPressed()\"))\n elif event.key() in (Qt.Key_Left, Qt.Key_Backspace) and \\\n self.textCursor().columnNumber() == START_COLUMN:\n # we just want to 'block' here, i.e., do nothing; the user\n # has navigated to the start of the column\n pass\n \n else:\n #self.keyPressEvent(event)\n super(QConsole, self).keyPressEvent(event)\n \n def mousePressEvent(self, event):\n ### this works but now you need to set the cursor \n # on the console initially...\n #self.textCursor().setPosition(100)\n #self.find(\">> \")\n ### you would think there'd be an easier\n # /less hacky way to do this..?\n for i in range(3):\n self.moveCursor(16)\n self.moveCursor(15)\n print \"(mouse clicked)\"\n " }, { "alpha_fraction": 0.4651162922382355, "alphanum_fraction": 0.4937388300895691, "avg_line_length": 30.882352828979492, "blob_id": "5e72667f7b12b039b489dc85e54ef67b00aaea51", "content_id": "6b3ed64eca22485a192707b5d04373c41a4331fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 559, "license_type": "no_license", "max_line_length": 70, "num_lines": 17, "path": "/src/R/HSROC/R/beta.parameter.R", "repo_name": "bwallace/OpenMeta-analyst-", "src_encoding": "UTF-8", "text": "beta.parameter <-\r\nfunction (low, up) \r\n{\r\n if (any(low < 0) | any(low > 1) | any(up < 0) | any(up > \r\n 1)) \r\n stop(\"Range limits must fall within [0, 1]\")\r\n if (all(low < up) == FALSE) \r\n stop(\"minimum argument 'low' must be < maximum argument 'up'\")\r\n mu = (up + low)/2\r\n s = (up - low)/4\r\n a = (-mu * (s^2 + mu^2 - mu))/s^2\r\n b = ((mu - 1) * (s^2 + mu^2 - mu))/s^2\r\n results = mapply(beta.condition, a, b)\r\n rownames(results) = c(\"alpha\", \"beta\")\r\n colnames(results) = 1:length(low)\r\n return(results)\r\n}\r\n" } ]
124
mehrad/solid_presentation
https://github.com/mehrad/solid_presentation
c05c95cfa312e3e2c190d6a7953a78b6c0be7c44
aec4548d02e630e338965224bee70a0f331bbf70
3643ac58060556df1ec41b39688c5517d915748e
refs/heads/master
2022-12-30T07:24:38.633177
2020-09-27T04:27:01
2020-09-27T04:37:04
298,982,963
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.577464759349823, "alphanum_fraction": 0.5985915660858154, "avg_line_length": 26.399999618530273, "blob_id": "2afc49ef0fd84b4c03d126d21579e7b4bca8f54d", "content_id": "b86c76176dbee3352bc2a6216130d200b4dbcc4a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 142, "license_type": "permissive", "max_line_length": 37, "num_lines": 5, "path": "/app/logic.py", "repo_name": "mehrad/solid_presentation", "src_encoding": "UTF-8", "text": "def get_next_field(data):\r\n print(data)\r\n print(data[0])\r\n print(data[0]['next_field_slug'])\r\n return data[0]['next_field_slug']\r\n" }, { "alpha_fraction": 0.27209463715553284, "alphanum_fraction": 0.2776618003845215, "avg_line_length": 27.19607925415039, "blob_id": "e6824378c2c00962864902200ef19976bcc5928d", "content_id": "33cbbf58aaefe2d38cd2f105e3f0c39d64b95d3e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1437, "license_type": "permissive", "max_line_length": 73, "num_lines": 51, "path": "/app/test_logic.py", "repo_name": "mehrad/solid_presentation", "src_encoding": "UTF-8", "text": "import unittest\nimport logic\n\nclass Test_TestIncrementDecrement(unittest.TestCase):\n def setUp(self):\n self.sample_data = [\n {\n 'next_field_slug':'slugA',\n 'conditions':\n [\n {\n 'operand':'>',\n 'answer':'20',\n 'logical_operand':'and'\n }\n ,{\n 'operand':'<=',\n 'answer':'30',\n 'logical_operand':'or'\n }\n ]\n },\n {\n 'next_field_slug':'slugB',\n 'conditions':\n [\n {\n 'operand':'<=',\n 'answer':'20',\n 'logical_operand':'or'\n }\n ]\n },\n {\n 'next_field_slug':'slugC',\n 'conditions':\n [\n {\n 'operand':'>',\n 'answer':'30',\n 'logical_operand':'or'\n }\n ]\n }\n ]\n def test_get_field(self):\n self.assertEqual(logic.get_next_field(self.sample_data), 'slugA')\n\n\nif __name__ == '__main__':\n unittest.main()" } ]
2
bsgmike/PyqtImageBrowser1
https://github.com/bsgmike/PyqtImageBrowser1
27fcac3bea7ac143d9ff84afe0fb77255b1211c3
b359ef40f280395c8958cbb8057113e75df07989
63544f3c306cc3820429f9b88c7b6e765d814390
refs/heads/master
2020-06-26T16:43:38.431317
2016-12-04T07:48:59
2016-12-04T07:48:59
74,548,791
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6363058090209961, "alphanum_fraction": 0.6504530310630798, "avg_line_length": 34.948570251464844, "blob_id": "b6cb3340f499198c5eb213e734690609a82e4e31", "content_id": "16fc93d05ceb46329784cf456d6ff1e0d3131e96", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6291, "license_type": "no_license", "max_line_length": 128, "num_lines": 175, "path": "/fileBrowser_1/file_browser.py", "repo_name": "bsgmike/PyqtImageBrowser1", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport sys\nimport os\nfrom PyQt4 import QtGui, QtCore\nfrom PyQt4.QtCore import Qt\nfrom PyQt4.QtGui import *\nimport exifread\n#from PyQt4 import QtCore, QtGui\n\n\n#http://stackoverflow.com/questions/24106903/resizing-qpixmap-while-maintaining-aspect-ratio\nclass ImageLabel(QtGui.QLabel):\n def __init__(self, img):\n #QLabel.__init__()\n super(ImageLabel, self).__init__()\n self.setFrameStyle(QFrame.StyledPanel)\n self.pixmap = QPixmap(img)\n\n def paintEvent(self, event):\n size = self.size()\n painter = QtGui.QPainter(self)\n point = QtCore.QPoint(0,0)\n scaledPix = self.pixmap.scaled(size, Qt.KeepAspectRatio, transformMode = Qt.SmoothTransformation)\n # start painting the label from left upper corner\n point.setX((size.width() - scaledPix.width())/2)\n point.setY((size.height() - scaledPix.height())/2)\n #print point.x(), ' ', point.y()\n painter.drawPixmap(point, scaledPix)\n\n def ChangePixmap(self, img):\n self.pixmap = QtGui.QPixmap(img)\n self.repaint() # repaint() will trigger the paintEvent(self, event), this way the new pixmap will be drawn on the label\n\nclass MyButton(QPushButton):\n def __init__(self, text):\n super(MyButton, self).__init__()\n self.setFixedWidth(100)\n self.setFixedHeight(30)\n self.setFont(QtGui.QFont('SansSerif', 12))\n self.setStyleSheet(\"background-color: #FFF096; color: blue\")\n # self.setStyleSheet(\"color: blue\")\n self.setText(text)\n def close(self):\n self\n\n\nclass Browser( QWidget):\n def __init__(self):\n super(Browser, self).__init__()\n\n self.resize(1000, 800)\n self.setWindowTitle(\"File Browser\")\n self.treeView = QTreeView()\n self.fileSystemModel = QFileSystemModel(self.treeView)\n self.fileSystemModel.setReadOnly(False)\n root = self.fileSystemModel.setRootPath(\"C:\\\\Users\\\\mbowley\\\\Pictures\")\n self.treeView.setModel(self.fileSystemModel)\n self.treeView.setRootIndex(root)\n self.treeView.setColumnWidth(0, 200)\n self.treeView.clicked.connect(self.on_treeView_clicked)\n\n\n self.label = QLabel()\n self.pixmap = QPixmap(os.getcwd() + '/eclipse.png')\n self.label.setPixmap(self.pixmap)\n\n self.label2 = ImageLabel(\"python.jpg\")\n\n #Create some buttons\n self.closeButton = MyButton(\"Exit\")\n self.runButton = MyButton(\"Run\")\n self.dirButton = MyButton(\"Set Dir\")\n\n\n # Create textboxes\n self.fileNametextbox = QLineEdit()\n self.fileNametextbox.move(200, 20)\n self.fileNametextbox.resize(20, 180)\n\n self.pathNametextbox = QLineEdit()\n self.pathNametextbox.move(200, 20)\n self.pathNametextbox.resize(20, 180)\n\n # create QLabels\n self.filepathLabel = QLabel()\n self.filepathLabel.setText(\"python.jpg\")\n self.filepathLabel.setFrameStyle(QFrame.Panel | QFrame.Sunken);\n self.filepathLabel.setFixedWidth(200)\n self.filepathLabel.setFixedHeight(40)\n self.filepathLabel.setFont(QtGui.QFont('SansSerif', 14))\n\n self.exifLabel = QLabel()\n self.exifLabel.setText(\"Exif data goes here\")\n self.exifLabel.setFrameStyle(QFrame.Panel | QFrame.Sunken);\n self.exifLabel.setFixedWidth(400)\n self.exifLabel.setFixedHeight(240)\n self.exifLabel.setFont(QtGui.QFont('SansSerif', 8))\n\n # create textbox for tags\n self.tagtextBox = QTextEdit()\n self.tagtextBox.setText(\"Exif data goes here\")\n self.tagtextBox.setFixedWidth(500)\n self.tagtextBox.setFixedHeight(240)\n self.tagtextBox.setFont(QtGui.QFont('SansSerif', 8))\n\n\n LeftPanelLayout = QHBoxLayout()\n RightPanelLayout = QVBoxLayout()\n TopLevelPanelLayout = QHBoxLayout()\n TopLevelLayout = QVBoxLayout()\n ButtonBar = QHBoxLayout()\n\n #put the file browser into the left panel\n LeftPanelLayout.addWidget(self.treeView)\n\n #add the textbox and picture into the right hand panel\n RightPanelLayout.addWidget(self.fileNametextbox)\n RightPanelLayout.addWidget(self.pathNametextbox)\n RightPanelLayout.addWidget(self.filepathLabel)\n # RightPanelLayout.addWidget(self.exifLabel)\n RightPanelLayout.addWidget(self.tagtextBox)\n RightPanelLayout.addWidget(self.label2)\n\n # add the buttons to the button bar\n ButtonBar.addWidget(self.closeButton)\n ButtonBar.addWidget(self.runButton)\n ButtonBar.addWidget(self.dirButton)\n ButtonBar.setAlignment(Qt.AlignLeft)\n\n #Add the left and right layouts into the top level layout\n TopLevelPanelLayout.addLayout(LeftPanelLayout)\n TopLevelPanelLayout.addLayout(RightPanelLayout)\n\n # add the panel layout and the button bar into the top level layout\n TopLevelLayout.addLayout(TopLevelPanelLayout)\n TopLevelLayout.addLayout(ButtonBar)\n self.setLayout(TopLevelLayout)\n\n self.closeButton.clicked.connect(self.close)\n\n def on_treeView_clicked(self, index):\n indexItem = self.fileSystemModel.index(index.row(), 0, index.parent())\n\n fileName = self.fileSystemModel.fileName(indexItem)\n filePath = self.fileSystemModel.filePath(indexItem)\n\n self.fileNametextbox.setText(fileName)\n self.pathNametextbox.setText(filePath)\n self.pixmap = QPixmap(filePath)\n\n self.filepathLabel.setText(fileName)\n #self.label.setPixmap(self.pixmap)\n #self.label.resize(640,480)\n\n self.label2.ChangePixmap(filePath)\n f = open(filePath, 'rb')\n tags = exifread.process_file(f)\n f.close()\n\n self.exifLabel.setText(\"\")\n tagStr = \"\"\n for tag in tags.keys():\n if tag not in ('JPEGThumbnail', 'TIFFThumbnail', 'Filename', 'EXIF MakerNote'):\n # print\n # \"Key: %s, value %s\" % (tag, tags[tag])\n tagStr = tagStr + \"Key: %s, value %s\" % (tag, tags[tag])\n tagStr = tagStr + \"\\r\"\n self.tagtextBox.setText(tagStr)\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n main = Browser()\n main.show()\nsys.exit(app.exec_())\n" } ]
1
LuigiChiricosta/Development-of-next-generation-sequencing-tools-for-the-diagnosis-of-neurodevelopmental-disorders
https://github.com/LuigiChiricosta/Development-of-next-generation-sequencing-tools-for-the-diagnosis-of-neurodevelopmental-disorders
717130442bcb6479effe0f26f038ad7ca46b3ec2
4dbafd807f9a9a083f6d29e703292eb3df1cad18
472dab863cbd86469efa28a4e8f2966aa3eeb54c
refs/heads/master
2020-03-09T18:58:00.353593
2018-05-07T13:28:40
2018-05-07T13:28:40
128,945,803
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6114649772644043, "alphanum_fraction": 0.6301183104515076, "avg_line_length": 38.9636344909668, "blob_id": "db593d657d1dc48e54c04afa1d3a9402397a9fa5", "content_id": "bbf1b8f3ecfe00afd57faeac3ac77645ef8ef21c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2198, "license_type": "no_license", "max_line_length": 166, "num_lines": 55, "path": "/reduceGnomAD.py", "repo_name": "LuigiChiricosta/Development-of-next-generation-sequencing-tools-for-the-diagnosis-of-neurodevelopmental-disorders", "src_encoding": "UTF-8", "text": "def get_options():\n import optparse\n desc = 'Compact genome and exome GnomAD reduced datasets'\n parser = optparse.OptionParser(\"help: -f file1 -g file2\", description=desc)\n parser.add_option(\"-f\", \"--file1\", action=\"store\", type='string', dest='f1', help='First file path')\n parser.add_option('-g', \"--file2\", action='store', type='string', dest='f2', help='Second file path')\n (options, args) = parser.parse_args()\n\n\treturn args,options.f1,options.f2\n\ndef readFile(variantDictionary, filePath):\n\tf=0\n\twith open(filePath, \"r\") as fileName:\n\t\tfor line in fileName:\n\t\t\tif f<0:\n\t\t\t\treturn\n\t\t\telse:\n\t\t\t\tf += 1\n\t\t\t\tparameters = line.replace(\"\\n\", \"\").split(\"\\t\")\n\t\t\t\tID = parameters[0].split(\":\")\n\t\t\t\tif len(ID) != 1:\n\t\t\t\t\talternatives = ID[3]\n\t\t\t\t\tACSplitted = parameters[1].split(\",\")\n\t\t\t\t\tHomSplitted = parameters[2].split(\",\")\n\t\t\t\t\tHemiSplitted = parameters[3].split(\",\")\n\t\t\t\t\talternativesSplitted = alternatives.split(\",\")\n\t\t\t\t\tfor a in range(len(alternativesSplitted)):\n\t\t\t\t\t\talternative = alternativesSplitted[a]\n\t\t\t\t\t\tkey = ID[0],ID[1],ID[2],alternative\n\t\t\t\t\t\t#print parameters, key\n\t\t\t\t\t\tAC = int(ACSplitted[a]) if a < len(ACSplitted) and ACSplitted[a] != \"-\" and ACSplitted[a] != \".\" else 0\n\t\t\t\t\t\tHom = int(HomSplitted[a]) if a < len(HomSplitted) and HomSplitted[a] != \"-\" and HomSplitted[a] != \".\" else 0\n\t\t\t\t\t\tHemi = int(HemiSplitted[a]) if a < len(HemiSplitted) and HemiSplitted[a] != \"-\" and HemiSplitted[a] != \".\" else 0\n\t\t\t\t\t\tif key not in variantDictionary:\n\t\t\t\t\t\t\tvariantDictionary[key] = [AC, Hom, Hemi]\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tvariantDictionary[key] = [AC+variantDictionary[key][0], Hom+variantDictionary[key][1], Hemi+variantDictionary[key][2]]\n\nif __name__ == '__main__':\n\timport sys\n\n\targs = sys.argv;\n\targs, file1, file2 = get_options()\n\n\tvariantDictionary = {}\n\t\t\n\tif file1 != None and file2 != None:\n\t\treadFile(variantDictionary, file1)\n\t\treadFile(variantDictionary, file2)\n\telse:\n\t\tprint \"help: -f file1 -g file2\t\"\n\n\tprint \"ID\\tAC\\tHom\\tHemi\"\n\tfor i in variantDictionary:\n\t\tprint str(i[0])+\":\"+str(i[1])+\":\"+str(i[2])+\":\"+str(i[3])+\"\\t\"+str(variantDictionary[i][0])+\"\\t\"+str(variantDictionary[i][1])+\"\\t\"+str(variantDictionary[i][2])+\"\\t\"\n" }, { "alpha_fraction": 0.8600000143051147, "alphanum_fraction": 0.8600000143051147, "avg_line_length": 99, "blob_id": "7b30e1f2f2b41ef9c7efff9b493f4fa59be3ceb7", "content_id": "aa8cc485fa007d992e97d5a885fab4246c8ac761", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 100, "license_type": "no_license", "max_line_length": 99, "num_lines": 1, "path": "/README.md", "repo_name": "LuigiChiricosta/Development-of-next-generation-sequencing-tools-for-the-diagnosis-of-neurodevelopmental-disorders", "src_encoding": "UTF-8", "text": "# Development-of-next-generation-sequencing-tools-for-the-diagnosis-of-neurodevelopmental-disorders\n" }, { "alpha_fraction": 0.6045566201210022, "alphanum_fraction": 0.613950252532959, "avg_line_length": 37.86920928955078, "blob_id": "a0e1f5e73c252fc0de18586d26f13c50a234b274", "content_id": "2bd239eec2a79b878eb5d2308026ddfdb2127926", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 14265, "license_type": "no_license", "max_line_length": 644, "num_lines": 367, "path": "/pipeline_Luigi_Chiricosta.sh", "repo_name": "LuigiChiricosta/Development-of-next-generation-sequencing-tools-for-the-diagnosis-of-neurodevelopmental-disorders", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nreset\n\necho \"****************************************\"\necho \"* Pipeline to annotate variants *\"\necho \"* *\"\necho \"* Neurodevelopmental disorders project *\"\necho \"* *\"\necho \"* Author: Luigi Chiricosta *\"\necho \"****************************************\"\necho\necho\nread -r -p \"Do you want run the complete pipeline? [Y/N] \" completePipeline\n\nif [ \"$1\" != \"\" ]; then\n\tproject=$1\nelse\n\tproject=\"ASD_Project\"\n\techo \"I use the default project name.\"\nfi\n\necho \"Project name: $project\"\necho\n\n#initialize path variables\nhome=\"$HOME\"\ndesktop=\"$home/Desktop\"\nannovarToolPath=\"$desktop/Software/annovar\"\nworkingPath=\"$desktop/Pipeline_Luigi_Chiricosta\"\n#VCFInputPath=\"$workingPath/VCFfiles/12-13_04_2018\"\nVCFInputPath=\"$desktop/Exoms_data/SNP/Vcf\"\npath=\"$workingPath/$project\"\nannovarOutput=\"$path/annovarOutput\"\nmergeFileName=\"merged_output_vcf.vcf.gz\"\noutputAnnovarFileName=\"merg_out\"\nreducedGnomADRelease=\"$desktop/Pipeline_Luigi_Chiricosta/GnomAD releases/reduced_gnomad_release.txt\" #extra file\n#done\n\nif [ $(dpkg -l | grep -E '^ii' | grep -w realpath | wc -l) -eq 0 ]; then\n echo \"Realpath command is not in the system... installing...\"\n sudo apt-get install realpath\nfi\n\necho \"> Project folder path: \"$(realpath $path)\n\nif [ \"$completePipeline\" = \"N\" ]; then\n echo\n echo\n read -r -p \"Do you want to prepare the input vcf? [Y/N] \" pipelineNextStep\nelse\n\techo \"Complete option confirmed.\"\n\techo\nfi\n\nif [[ \"$completePipeline\" != \"N\" ]] || [[ \"$pipelineNextStep\" != \"N\" ]]; then\n\tif [ $(dpkg -l | grep -E '^ii' | grep -w tabix | wc -l) -eq 0 ]; then\n\t\techo \"Tabix command is not in the system... installing...\"\n\t\tsudo apt-get install tabix\n\tfi\n\n\tif [[ -e $path ]] ; then\n\t\ti=1\n\t \twhile [[ -e $path-$i ]] ; do\n\t \tlet i++\n\t\tdone\n\t\n\t\tpath=$path-$i\n\t echo \"> Redefined project folder path: \"$(realpath $path)\n\tfi\n\t\n\tmkdir \"$path\"\n\t\n\tannovarOutput=\"$path/annovarOutput\" #rewrite annovarOutput if create a new project folder\n echo \"> Annovar Output: $annovarOutput <\"\n mkdir $annovarOutput\n\n\t#TODO use input folder for vcf files\n\techo \"> VCF input files in: \"$(realpath $VCFInputPath)\n\t\n\t#if vcf does not have its tbi, do it\n\tfor vcf in $VCFInputPath/*.vcf.gz; do\n\t\tname=$(basename \"$vcf\")\n\t\tif [[ ! -f \"$vcf.tbi\" ]]; then\n\t\t\techo -e \"$name is not alreay indexed, i am doing it... \\c\"\n\t\t\ttabix -p vcf \"$vcf\"\n\t\t\techo \"done.\"\n\t\tfi\n\tdone\n\t\n\techo\n\techo -e \"Merging the vcf files... \\c\"\n\tvcf-merge $VCFInputPath/*.vcf.gz 2>/dev/null | bgzip -c > $annovarOutput/$mergeFileName\n\techo \"done.\"\n\techo \"Annoting with annovar... \"\n\tperl $annovarToolPath/table_annovar.pl $annovarOutput/$mergeFileName $annovarToolPath/humandb/ -buildver hg19 -vcfinput -out $annovarOutput/$outputAnnovarFileName -remove -protocol refGene,dbnsfp33a,intervar_20170202,clinvar_20170905,avsnp150,gnomad_genome,gnomad_exome,cosmic70 -operation g,f,f,f,f,f,f,f -nastring .\n\techo\n\techo \"Completed.\"\n\techo\nfi\n\t\nif [ \"$completePipeline\" = \"N\" ]; then\n\techo\n\techo\n\tread -r -p \"Do you want to build the new table? [Y/N] \" pipelineNextStep\nfi\n\nif [[ \"$completePipeline\" != \"N\" ]] || [[ \"$pipelineNextStep\" != \"N\" ]]; then\n\techo \"Building new table...\"\n\tmultiannoPath=\"$annovarOutput/$outputAnnovarFileName.hg19_multianno\"\n\t\n\t#Catch the sort of the columns\n\tSPECIAL_FIELD=\"PATIENTS_LIST\"\n\tvcfInfo=$(grep -w \"#CHROM\" \"$multiannoPath.vcf\")\n\toriginalCompleteSort=$(head -n1 \"$multiannoPath.txt\" | sed \"s/Otherinfo/Otherinfo1\\tOtherinfo2\\tOtherinfo3\\t$vcfInfo\\t/g\")\n\tnewSort=$(echo -e \"Chr\\tStart\\tEnd\\tRef\\tAlt\\tFunc.refGene\\tGene.refGene\\tGeneDetail.refGene\\tExonicFunc.refGene\\tAAChange.refGene\\tills_in_db\\tpatients_id_list\\tavsnp150\\tcosmic70\\tgnomAD_exome_ALL\\tgnomAD_genome_ALL\\tAC_gnomAD\\tHom_gnomAD\\tHemi_gnomAD\\tInterVar(automated)\\tCLINSIG\\tCLNACC\\tSIFT_pred\\tPolyphen2_HDIV_pred\\tPolyphen2_HVAR_pred\\tLRT_pred\\tMutationTaster_pred\\tMutationAssessor_pred\\tFATHMM_pred\\tPROVEAN_pred\\tMetaSVM_pred\\tMetaLR_pred\\tM-CAP_pred\\tCADD_phred\\tfathmm-MKL_coding_pred\\tGERP++_RS\\tInterpro_domain\\tINFO\\tFORMAT\\t$SPECIAL_FIELD\") #insert column PATIENTS_LIST to have the list of patients starting from that point\n\tfirstColumn=\"Chr\\tPOS\\tREF\\tALT\";\n\t\n\t#Retrevial the patients\n\tpatientsList=$(echo -e \"$vcfInfo\" | awk -F '\\t' '{for(i=10;i<=NF;i++) printf (\"%s%s\", $i, (i!=NF) ? \"\\t\" : \"\")}')\n\tpatients=$(echo -e \"$patientsList\" | awk -F '\\t' '{print split($0, patients, \"\\t\")}')\n\techo \"Found $patients patients.\"\n\t\n\t#Adjust the list if you want the patients too\n\tpatientStartingList=${newSort#*$SPECIAL_FIELD}\n\tpatientStartingPosition=$(((${#newSort}-${#patientStartingList}-${#SPECIAL_FIELD})+1))\n\tif [[ $patientStartingPosition -gt -1 ]]; then\n\t\tstartingFieldPatientsList=$(echo -e \"${newSort:0:$patientStartingPosition}\" | awk -F \"\\t\" '{print NF}')\n\t\tnewSort=$(echo -e \"$newSort\" | sed \"s/$SPECIAL_FIELD/$patientsList/g\")\n\tfi\n\t\n\tcolumnFORMAT=$(grep -e ^[^#] \"$multiannoPath.vcf\" | awk -F '\\t' '{valuesFORMATLength=split($9, valuesFORMAT, \":\"); for(i=1;i<=valuesFORMATLength;i++) print valuesFORMAT[i]}' | sort | uniq | awk 'BEGIN{FORMAT=\"\"}{if(FORMAT==\"\")FORMAT=$0; else FORMAT=FORMAT\":\"$0;}END{print FORMAT}')\n\t\n\tcp \"$multiannoPath.txt\" \"$multiannoPath.complete.txt\"\n\t\n\t#add Extra columns\n\tif [[ \"$newSort\" == *\"AC_gnomAD\"* || \"$newSort\" == *\"Hom_gnomAD\"* || \"$newSort\" == *\"Hemi_gnomAD\"* ]]; then\n\t\techo \"Found extra GnomAD columns\"\n\t\n\t\ttmp=\"$multiannoPath.txt.tmp\"\n\t\n\t\tawk -F '\\t' -v originalCompleteSort=\"$originalCompleteSort\" -v firstColumn=\"$firstColumn\" '\n\t\tBEGIN{\n\t \t#elaborate the names row for each column\n\t\t idxLength=split(firstColumn, idxArray, \"\\t\");\n\t\t\n\t\t #elaborate the idx column (first one)\n\t\t for(i=1; i<=idxLength; i++)\n\t\t printf(\"%s%s\", idxArray[i], (i!=idxLength) ? \":\" : \"\\t\");\n\t \t\n\t\t originalSortSplittedLength = split(originalCompleteSort, originalSortSplitted, \"\\t\");\n\t \tfor(i=1;i<=originalSortSplittedLength; i++)\n\t \toriginalSortDictionary[originalSortSplitted[i]]=i\n\t\t}\n\t\t{\n\t\t if(NR==1)\n\t\t print $0\n\t \n\t\t #starting with a new row, reset the variables\n\t\t if(NR!=1)\n\t\t {\n\t\t #elaborate idx column (first one)\n\t\t for(i=1; i<=idxLength; i++)\n\t\t printf(\"%s%s\", $originalSortDictionary[idxArray[i]], (i!=idxLength) ? \":\" : \"\\t\"$0\"\\n\");\n\t\t\n\t\t }\n\t\t}' \"$multiannoPath.txt\" > \"$tmp\"\n\t\trm \"$multiannoPath.complete.txt\"\n\t\tmv \"$tmp\" \"$multiannoPath.complete.txt\"\n\t\t\n\t\tformat=\"0\"\n\t var1=$(echo -e \"$originalCompleteSort\" | awk -F \"\\t\" '{print NF+1}'); \n\t for i in $(seq 2 \"$var1\"); do\n\t format=$format\",1.$i\"; \n\t done\n\t\n\t var2=$(head -n1 \"$reducedGnomADRelease\" | awk -F \"\\t\" '{print NF}'); \n\t for i in $(seq 2 \"$var2\"); do\n\t format=$format\",2.$i\";\n\t done\n\t\t\n\t\techo -e \"I am joining it... \\c\"\n\t\t\n\t\theader1=$(head -n1 \"$multiannoPath.complete.txt\")\n\t\theader2=$(head -n1 \"$reducedGnomADRelease\" | cut -f2- -d$'\\t')\n\t\techo -e \"$header1\\t$header2\" | awk -F '\\t' '{for(i=2; i<=NF; i++) printf(\"%s%s\", $i, (i!=NF) ? \"\\t\" : \"\\n\");}' > \"$tmp\"\n\t\t\n\t\tjoin -t $'\\t' -a1 -e \"-\" -o \"$format\" <(awk -F '\\t' '{if(NR!=1)print $0}' \"$multiannoPath.complete.txt\" | sort -k1,1) <(awk -F '\\t' '{if(NR!=1){print $0}}' \"$reducedGnomADRelease\" | sort -k1,1) | awk -F '\\t' '{for(i=2; i<=NF; i++) printf(\"%s%s\", $i, (i!=NF) ? \"\\t\" : \"\\n\");}' | vcf-sort -c >> \"$tmp\" 2>/dev/null\n\t\t\n\t\n\t\tif [[ \"$newSort\" == *\"AC_gnomAD\"* ]]; then\n\t\t\toriginalCompleteSort=\"$originalCompleteSort\\tAC_gnomAD\"\n\t\tfi\n\t\tif [[ \"$newSort\" == *\"Hom_gnomAD\"* ]]; then\n\t originalCompleteSort=\"$originalCompleteSort\\tHom_gnomAD\"\n\t fi\n\t\tif [[ \"$newSort\" == *\"Hemi_gnomAD\"* ]]; then\n\t originalCompleteSort=\"$originalCompleteSort\\tHemi_gnomAD\"\n\t fi\n\t\techo \"done.\"\n\t\t\n\t\trm \"$multiannoPath.complete.txt\"\n\t\tmv \"$tmp\" \"$multiannoPath.complete.txt\"\n\tfi\n\t\n\techo \"Building the new table... \\c\"\n\t\n\tawk -F '\\t' -v originalCompleteSort=\"$originalCompleteSort\" -v newSort=\"$newSort\" -v firstColumn=\"$firstColumn\" -v patients=\"$patients\" -v firstPatientsField=\"$startingFieldPatientsList\" -v columnFORMAT=\"$columnFORMAT\" 'BEGIN{\n\t\t#elaborate the names row for each column\n\t\t#elaborate the idx column (first one)\n\t idxLength=split(firstColumn, idxArray, \"\\t\");\n\t for(i=1; i<=idxLength; i++)\n\t printf(\"%s%s\", idxArray[i], (i!=idxLength) ? \":\" : \"\\t\");\n\t\n\t\toriginalSortSplittedLength = split(originalCompleteSort, originalSortSplitted, \"\\t\");\n\t\tfor(i=1;i<=originalSortSplittedLength; i++)\n\t\t\toriginalSortDictionary[originalSortSplitted[i]]=i\n\t\tnewSortSplittedLength = split(newSort, newSortSplitted, \"\\t\");\n\t\tfor(i=1; i<=newSortSplittedLength; i++)\n\t \tprintf(\"%s%s\", newSortSplitted[i], (i!=newSortSplittedLength) ? \"\\t\" : \"\\n\");\n\t\tlastPatientsField=firstPatientsField+patients\n\t}\n\t{\n\t\t#starting with a new row, reset the variables\n\t\tif(NR!=1)\n\t\t{\t\n\t\t\tills=0\n\t\t\tillsList=\"\"\n\t\n\t\t\t#elaborate idx column (first one)\n\t for(i=1; i<=idxLength; i++)\n\t printf(\"%s%s\", $originalSortDictionary[idxArray[i]], (i!=idxLength) ? \":\" : \"\\t\");\n\t\n\t\t\t#elaborate all the other columns\n\t\t\tfinalArray=\"\"\n\t\t\tfor(i=1; i<=newSortSplittedLength; i++)\n\t\t {\n\t\t\t\t#check if the actual value is a special column or not\n\t\t if(originalSortDictionary[newSortSplitted[i]] == \"\") #the actual column is special\n\t\t\t\t{\n\t\t\t\t\tif(newSortSplitted[i] == \"ills_in_db\")\n\t\t\t\t\t\tfinalArray=finalArray\"ills_in_db\\t\"\n\t\t\t\t\telse if(newSortSplitted[i] == \"patients_id_list\")\n\t finalArray=finalArray\"patients_id_list\\t\"\n\t\t\t\t\t\n\t\t\t\t\telse\n\t\t\t\t\t{\n\t\t\t\t\t\tfinalArray=finalArray\"COLUMN_ERROR\\t\"\n\t\t\t printf \"COLUMN_ERROR(\"newSortSplitted[i]\")\\t\"\n\t\t\t\t\t}\n\t\t }\n\t\t\t\telse #the actual column is not special\n\t\t\t\t{\n\t\t\t\t\tif(firstPatientsField == -1 || i<firstPatientsField || i>=lastPatientsField)\n\t\t\t\t\t{\n\t\t\t\t\t\tif(newSortSplitted[i] == \"FORMAT\")\n\t\t\t\t\t\t\tfinalArray=finalArray\"\"columnFORMAT\"\\t\"\n\t\t\t\t\t\telse\n\t\t\t\t\t\t\tfinalArray=finalArray\"\"$originalSortDictionary[newSortSplitted[i]]\"\\t\"\n\t\t\t\t\t}\n\t\t\t\t\telse\n\t\t\t\t\t{\n\t\t\t\t\t\t#check if is a patient column and its status\n\t\t\t\t\t\tif(firstPatientsField != -1) #no one patient could be present\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tif(i>=firstPatientsField && i<lastPatientsField)\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t#print \"patient (\"i\"): \"$originalSortDictionary[newSortSplitted[i]]\n\t\t\t\t\t\t\t\tif($originalSortDictionary[newSortSplitted[i]]!=\".\")\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t#print $originalSortDictionary[\"FORMAT\"]\n\t\t\t\t\t\t\t\t\tcolumnFORMATSplittedLength = split($originalSortDictionary[\"FORMAT\"], columnFORMATSplitted, \":\")\n\t\t\t\t\t\t\t\t for(j=1;j<=columnFORMATSplittedLength; j++)\n\t\t\t\t\t\t\t\t\t\tcolumnFORMATDictionary[columnFORMATSplitted[j]]=j\n\t\t\t\t\t\t\t\t\tsplit($originalSortDictionary[newSortSplitted[i]], valuesFORMATSplitted, \":\")\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t#recostruct the new FORMAT column sort\n\t\t\t\t\t\t\t\t\tnewFORMATColumn = \"\"\n\t\t\t\t\t\t\t\t\tcolumnFORMATSplittedLength = split(columnFORMAT, columnFORMATSplitted, \":\")\n\t\t\t\t\t\t\t\t\tfor(j=1; j<=columnFORMATSplittedLength;j++)\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tnewFORMATColumn=newFORMATColumn\"\"valuesFORMATSplitted[columnFORMATDictionary[columnFORMATSplitted[j]]]\n\t\t\t\t\t\t\t\t\t\tif(j<columnFORMATSplittedLength)\n\t\t\t\t\t\t\t\t\t\t\tnewFORMATColumn=newFORMATColumn\":\"\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\tfinalArray=finalArray\"\"newFORMATColumn\"\\t\"\n\t\n\t\t\t\t\t\t\t\t\t#specific format details for patients with variant\n\t\t\t\t\t\t\t\t\tGQ = valuesFORMATSplitted[columnFORMATDictionary[\"GQ\"]]\n\t\t\t\t\t\t\t\t\tDP = valuesFORMATSplitted[columnFORMATDictionary[\"DP\"]]\n\t\t\t\t\t\t\t\t\tAF = valuesFORMATSplitted[columnFORMATDictionary[\"AF\"]]\n\t\t\t\t\t\t\t\t\t#print newSortSplitted[i]\" : \"GQ\"(\"columnFORMATDictionary[\"GQ\"]\") - \"DP\"(\"columnFORMATDictionary[\"DP\"]\") -\"AF\"(\"columnFORMATDictionary[\"AF\"]\")\"\n\t\t\t\n\t \t\t ills=ills+1\n\t \t\t illsList=illsList\"\"newSortSplitted[i]\"(GQ: \"GQ\", DP: \"DP\", AF: \"AF\"); \"\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\telse\n\t\t\t\t\t\t\t\t\tfinalArray=finalArray\".\\t\"\n\t\t\t\t\t\t\t}\n\t\t\t\n\t\t\t\t\t\t\t#filter to not visualize the patients of common variants\n\t\t\t\t\t\t\tif(i==lastPatientsField-1 && ills>5)\n\t\t\t\t\t\t\t\tillsList=\"-\"\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\t\n\t\t\t#replace the special columns\n\t\t\tgsub(\"ills_in_db\", ills\"/\"patients, finalArray)\n\t\t\tgsub(\"patients_id_list\", illsList, finalArray)\n\t\t\t\n\t\t\t#print the final record\n\t\t\tprint finalArray\n\t\t}\n\t}' \"$multiannoPath.complete.txt\" | awk -F \"\\t\" '{if(NR==1){gsub(\"CLNACC\", \"ClinVar_ID\", $0); gsub(\"AC_gnomAD\", \"AC\", $0); gsub(\"Hom_gnomAD\", \"Hom\", $0); gsub(\"Hemi_gnomAD\", \"Hem\", $0)}; print $0}' | uniq > \"$path/$project.xls\" #uniq command is necessary because in txt annovar output some rows are duplicated maybe due to more than one original reference allele\n\n\techo \"done.\"\n\techo\n\techo \"> Final output in: $path/$project.xls\"\n\techo\n\techo \"Completed.\"\nfi\n\nif [ \"$completePipeline\" = \"N\" ]; then\n echo\n echo\n read -r -p \"Do you want to attach the transcript information? [Y/N] \" pipelineNextStep\nfi\n\nif [[ \"$completePipeline\" != \"N\" ]] || [[ \"$pipelineNextStep\" != \"N\" ]]; then\n echo \"Attaching transcript information...\"\n\t\n\tif [ $(dpkg -l | grep -E '^ii' | grep -w curl | wc -l) -eq 0 ]; then\n \techo \"Curl command is not in the system... installing...\"\n \t\tsudo apt-get install curl\n\tfi\n\t\n\ttmp=\"$path/$project.plus.xls\"; \n\t> $tmp;\n \t\n\tlines=$(cat \"$path/$project.xls\" | wc -l)\n\tactualLine=0\n\twhile read in; do \n\t if [[ \"$in\" == *\"Chr:POS:REF:ALT\"* ]]; then \n\t\t\techo -e \"Mutalyzer\\t$in\" >> \"$tmp\"\n\t\telse\n\t\t\tactualLine=$((actualLine+1))\n\t\t\tid=$(awk -F '\\t' '{split($1, id, \":\"); multiplesLength=split(id[4], multiples, \",\"); if(multiplesLength==1) print id[1]\":g.\"id[2]\"\"id[3]\">\"id[4]; else print \"\"}' <<< \"$in\"); \n\t\t\tif [ \"$id\" = \"\" ]; then\n\t\t\t\ttranscript=\"-\"\n\t\t\telse\n\t\t transcript=$(curl 'https://mutalyzer.nl/json/numberConversion?build=hg19&variant='$id'' 2>/dev/null | sed 's/\\[//g' | sed 's/\\]//g' | sed 's/\\\"//g')\n\t\tfi\n\n\t echo -e \"$transcript\\t$in\" >> \"$tmp\"; \n\t\t\techo -ne \"Progress: $actualLine/$lines\\r\"\n\t fi; \n\tdone < \"$path/$project.xls\"\n\t\n\techo\n\techo \"done.\"\nfi\n\necho\necho\necho \"Pipeline finished.\"\n" } ]
3
chenhongbiao/My_Web_Spider
https://github.com/chenhongbiao/My_Web_Spider
7c366ff9e8b58351ef01675da8d7d594e5f5a302
ecf5360678e09da599160e504cd7ac2f23b080db
9dd069fc7f31ca1a506f41317a78dcf5ccbebace
refs/heads/master
2021-01-19T04:07:40.785679
2016-08-04T14:47:32
2016-08-04T14:47:32
64,398,626
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7149876952171326, "alphanum_fraction": 0.7285012006759644, "avg_line_length": 41.842105865478516, "blob_id": "4e11de79ae5243f2b6c20ed5308814559277eebc", "content_id": "dd7b4507d7020e97167724168b039b7ad96fb565", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 814, "license_type": "no_license", "max_line_length": 101, "num_lines": 19, "path": "/Web Scraping with Python/Chapter6/1-getText/1-getText/_1_getText.py", "repo_name": "chenhongbiao/My_Web_Spider", "src_encoding": "UTF-8", "text": "from urllib.request import urlopen\n\n#textPage = urlopen(\"http://www.pythonscraping.com/pages/warandpeace/chapter1.txt\")\ntextPage = urlopen(\"http://www.pythonscraping.com/pages/warandpeace/chapter1-ru.txt\")\n#print(textPage)\nprint(str(textPage.read()),\"utf-8\")\n\n#from urllib.request import urlopen\n#from bs4 import BeautifulSoup\n\n#html = urlopen(\"http://en.wikipedia.org/wiki/Python_(programming_language)\")\n#bsObj = BeautifulSoup(html,\"html.parser\")\n#content = bsObj.find(\"div\", {\"id\":\"mw-content-text\"}).get_text()\n#content = bytes(content, \"UTF-8\")\n#content = content.decode(\"UTF-8\")\n#print(content)\n#print(bsObj) #'gbk' codec can't encode character '\\xa0' in position 9173: illegal multibyte sequence\n #because this terminal is presenting character as gbk decode\n#print(bsObj.get_text())\n" }, { "alpha_fraction": 0.752017617225647, "alphanum_fraction": 0.7622890472412109, "avg_line_length": 42.935482025146484, "blob_id": "68c99a4508f4e5286d28c604c1ac5a43c1af1206", "content_id": "df4d08861e5b427ba1576376d9bbe0883fcbcf00", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1363, "license_type": "no_license", "max_line_length": 124, "num_lines": 31, "path": "/Web Scraping with Python/Chapter10/1-seleniumBasic/1-seleniumBasic/_1_seleniumBasic.py", "repo_name": "chenhongbiao/My_Web_Spider", "src_encoding": "UTF-8", "text": "from selenium import webdriver\nfrom bs4 import BeautifulSoup\nimport time\n\n#don't trick by the path like: D:\\Internet-IE\\phantomjs-2.1.1-windows\\bin - repalce '\\' to '/'\ndriver = webdriver.PhantomJS(executable_path=\"D:/Internet-IE/phantomjs-2.1.1-windows/bin/phantomjs\")\n#and you need to add phantomjs.exe path in your system OS PATH and you can test it in your cmd: phantomjs\ndriver.get(\"http://pythonscraping.com/pages/javascript/ajaxDemo.html\") # driver load the page and wait...\n#time.sleep(1)\ntime.sleep(3)\n#print(driver)\n#print(driver.find_element_by_id(\"content\").text)\npageSource = driver.page_source #get the source of the current page\nbsObj = BeautifulSoup(pageSource,\"html.parser\")\nprint(bsObj.find(id=\"content\").get_text())\ndriver.close()\n\n#from selenium import webdriver\n#from selenium.webdriver.common.by import By\n#from selenium.webdriver.support.ui import WebDriverWait\n#from selenium.webdriver.support import expected_conditions as EC\n\n#driver = webdriver.PhantomJS(executable_path=\"D:/Internet-IE/phantomjs-2.1.1-windows/bin/phantomjs\")\n#driver.get(\"http://pythonscraping.com/pages/javascript/ajaxDemo.html\")\n\n#try:\n #element = WebDriverWait(driver=driver,timeout=10).until(EC.presence_of_all_elements_located( (By.ID, \"loadedButton\") ))\n #print(element)\n#finally:\n #print(driver.find_element_by_id(\"content\").text)\n #driver.close()\n\n" }, { "alpha_fraction": 0.6969432234764099, "alphanum_fraction": 0.710917055606842, "avg_line_length": 38.517242431640625, "blob_id": "8367d20948807e3510781b81e5f30f7295e7aa4f", "content_id": "17002055f30348403fc6bce0ce6c2db02ea135f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1145, "license_type": "no_license", "max_line_length": 145, "num_lines": 29, "path": "/Web Scraping with Python/Chapter4/4-decodeJson/4-decodeJson/_4_decodeJson.py", "repo_name": "chenhongbiao/My_Web_Spider", "src_encoding": "UTF-8", "text": "#The HTTP API takes GET requests in the following schema:\n#freegeoip.net/{format}/{IP_or_hostname}\n#Supported formats are: csv, xml, json and jsonp. \n#If no IP or hostname is provided, then your own IP is looked up.\nfrom urllib.request import urlopen\nimport json\n\nojson = urlopen(\"https://freegeoip.net/json/github.com\")\nprint(ojson)\nprint(ojson.read())\n\ndef getCountry(ipAddress):\n response = urlopen(\"http://freegeoip.net/json/\"+ipAddress).read().decode(\"utf-8\")\n #want to decode the bytes which use the \"xx\" encoding method\n responseJson = json.loads(response)\n #return responseJson.get(\"country_name\")\n return responseJson[\"country_name\"]\n #in Python dict you can use method .get() or just responseJson[\"country_name\"]\n\nprint(getCountry(\"github.com\"))\nprint(getCountry(\"50.78.253.58\"))\n\njsonString = '{\"arrayOfNums\":[{\"number\":0},{\"number\":1},{\"number\":2}], \"arrayOfFruits\":[{\"fruit\":\"apple\"},{\"fruit\":\"banana\"}, {\"fruit\":\"pear\"}]}'\njsobj = json.loads(jsonString)\n\nprint(jsobj.get(\"arrayOfNums\"))\nprint(jsobj[\"arrayOfNums\"][1])\nprint(jsobj.get(\"arrayOfFruits\")[2][\"fruit\"])\nprint(jsobj.get(\"arrayOfFruits\")[2].get(\"fruit\"))" }, { "alpha_fraction": 0.6619099974632263, "alphanum_fraction": 0.7058177590370178, "avg_line_length": 38.65217208862305, "blob_id": "98d6d3e67251f3b200c725ce5371da5204ff7c54", "content_id": "c52e7da4243c4200a6f2a4713daca161fafcb827", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 911, "license_type": "no_license", "max_line_length": 122, "num_lines": 23, "path": "/Web Scraping with Python/Chapter5/1-getPageMedia/1-getPageMedia/_1_getPageMedia.py", "repo_name": "chenhongbiao/My_Web_Spider", "src_encoding": "UTF-8", "text": "from urllib.request import urlopen\nfrom urllib.request import urlretrieve\nfrom bs4 import BeautifulSoup\n\n#http://space.bilibili.com/1950746/#!/index\n#<div class=\"h-avatar\">\n#<img src=\"http://i2.hdslb.com/bfs/face/24032cad1ec7db242fda01061a4f8b3b998d170c.jpg\" id=\"h-avatar\" come-on-click-me!!=\"\">\n#</div>\n\n#def getAvater(userid):\n #html = urlopen(\"http://space.bilibili.com/\"+userid+\"/#!/index\")\n #bsObj = BeautifulSoup(html.read(),\"html.parser\")\n #imgtag = bsObj.find(\"img\", id = \"h-avater\")\n #cannot find this tag due to bilibili use javascript to load all pic \n #so you cannot find that tag in the raw html code\n #urlretrieve(imgtag[\"src\"],userid+\"avater\")\n\n#getAvater(\"1950746\")\n\nhtml = urlopen(\"http://www.pythonscraping.com\")\nbsObj = BeautifulSoup(html, \"html.parser\")\nimageLocation = bsObj.find(\"a\", {\"id\": \"logo\"}).find(\"img\")[\"src\"]\nurlretrieve (imageLocation, \"H:\\SpiderPic\\logo.jpg\")" }, { "alpha_fraction": 0.7093778848648071, "alphanum_fraction": 0.7112349271774292, "avg_line_length": 36.17241287231445, "blob_id": "265905b7e92c26e6c9e92c62299cbf1537d28a10", "content_id": "b397f7e58c4e557517327b523f3518cd48707611", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1077, "license_type": "no_license", "max_line_length": 91, "num_lines": 29, "path": "/Web Scraping with Python/Chapter2/1-selectByClass/1-selectByClass/_1_selectByClass.py", "repo_name": "chenhongbiao/My_Web_Spider", "src_encoding": "UTF-8", "text": "from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nhtml = urlopen(\"http://www.pythonscraping.com/pages/warandpeace.html\")\n#print(html.read()) #print the raw html\n# strange... once use this statement, the print(bsObj) didn't work.\nbsObj = BeautifulSoup(html.read(),\"html.parser\")\n#print(bsObj) #print the tree structure html~\n\ncolorList = bsObj.findAll(\"span\", {\"class\":\"green\", \"class\":\"red\"})\n#the following function would return both the green and red span tags in the HTML document:\nfor content in colorList:\n print(content.get_text())\n#strange... it only occur the first one match, not the expected both*\n\nnameList = bsObj.findAll(\"span\",{\"class\":\"green\"})\nfor name in nameList:\n #print(\"name.get_next(): \", name.get_text())\n print(\"name: \", name) \n\nwordList = bsObj.findAll(\"span\",{\"class\":\"red\"})\nfor word in wordList:\n print(word.get_text())\n\nheList = bsObj.findAll(text = \"the prince\")\n#the text should all* in the tags <span class=\"green\">the prince</span>\nprint(len(heList))\n\n#allText = bsObj.findAll(id=\"text\")\n#print(allText[0].get_text())" }, { "alpha_fraction": 0.7072784900665283, "alphanum_fraction": 0.7104430198669434, "avg_line_length": 30.600000381469727, "blob_id": "c4aad7c4a905f1cb6835c4a33c6cf917443a41e5", "content_id": "9a41b0135191dc3b2be44ea6baadb34e19ee7c81", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 632, "license_type": "no_license", "max_line_length": 104, "num_lines": 20, "path": "/Web Scraping with Python/Chapter6/3-readingCsv/3-readingCsv/_3_readingCsv.py", "repo_name": "chenhongbiao/My_Web_Spider", "src_encoding": "UTF-8", "text": "from urllib.request import urlopen\nfrom io import StringIO\nimport csv\n\ndata = urlopen(\"http://pythonscraping.com/files/MontyPythonAlbums.csv\").read().decode(\"ascii\")\ndataFile = StringIO(data) #make it look like a file stream so the csv library would like to deal with it\n#csvReader = csv.reader(dataFile)\n#print(csvReader)\n#print(\"-----------------------------\")\n#for row in csvReader:\n# print(row)\n# print(\"The ablum \"+row[0]+\"was realeased in \"+str(row[1]))\n\ndictReader = csv.DictReader(dataFile)\nprint(dictReader.fieldnames)\n#this take the first pair as the fieldname\nfor row in dictReader:\n print(row)\n\ndataFile.close() " }, { "alpha_fraction": 0.7561608552932739, "alphanum_fraction": 0.7600519061088562, "avg_line_length": 37.599998474121094, "blob_id": "05f663fd1d71418b35e2aa1db6d561cfc08f6c37", "content_id": "8277ae6f71f7c3312a5b0a4aa0df5c0f078a0de0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 771, "license_type": "no_license", "max_line_length": 90, "num_lines": 20, "path": "/Web Scraping with Python/Chapter6/6-readDocx/6-readDocx/_6_readDocx.py", "repo_name": "chenhongbiao/My_Web_Spider", "src_encoding": "UTF-8", "text": "from zipfile import ZipFile\nfrom urllib.request import urlopen\nfrom io import BytesIO\nfrom bs4 import BeautifulSoup\n\nwordFile = urlopen(\"http://pythonscraping.com/pages/AWordDocument.docx\").read()\nwordFile = BytesIO(wordFile) #turn it into Bytes stream so can be unzipp (open a zip file)\ndocument = ZipFile(wordFile)\n#xml_content = document.read('document.xml') \n#it seems that zip create a xml file name like below in the memory\n#you can see that in the debug windows\nxml_content = document.read('word/document.xml')\n #return file bytes as a string for name\nprint(xml_content.decode('utf-8'))\n\nwordObj = BeautifulSoup(xml_content.decode(\"utf-8\"),\"html.parser\")\ntextStrings = wordObj.findAll(\"w:t\")\nfor textElem in textStrings:\n print(textElem.text)\n #get_text()" }, { "alpha_fraction": 0.6751506924629211, "alphanum_fraction": 0.6784996390342712, "avg_line_length": 40.48611068725586, "blob_id": "7020eef492bc23b458d45ee1c67b18f6a54f8d16", "content_id": "17c7d580f1e5a2ec4e92e6968c8d2e4f941268b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2986, "license_type": "no_license", "max_line_length": 106, "num_lines": 72, "path": "/Web Scraping with Python/Chapter3/3-crawlSite.py/3-crawlSite.py/_3_crawlSite.py.py", "repo_name": "chenhongbiao/My_Web_Spider", "src_encoding": "UTF-8", "text": "from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport re\nimport random\nimport datetime\n\n#take a bsObj (the page you want to retrieve) and a includeUrl\n#Retrieves a Python list of all Internal links found on a page\n#def getInternalLinks(bsObj, includeUrl):\n #Find all links that begin with a \"/\"\n #^(/ | . * includeUrl) - begin with a \"/\" or some internalLink have a different pattern use includeUrl\n \n#Retrieves a Python list of all external links found on a page\n#def getExternalLinks(bsObj, excludeUrl):\n #Find all links that start with \"http\" or \"www\" that do\n #not contain the current URL\n #^(http|www) ( (?!excludeUrl). )*$\n #find url start with http or www but not find some excludeUrl that you set\n \n#str = \"www.chenhongbiao.github.com\"\n#str.split() - [\"www.chenhongbiao.github.com\"] - use space as split-base by default\n#str.split('.') - [\"www\", \"chenhongbiao\", \"github\", \"com\"] - use '.' as split-base\n#str.split('.', 1) - [\"www\", \"chenhongbiao.github.com\"] - split only once (one time)\n\npages = set()\nrandom.seed(datetime.datetime.now())\n\n#Retrieves a list of all Internal links found on a page\ndef getInternalLinks(bsObj, includeUrl):\n internalLinks = []\n #Finds all links that begin with a \"/\"\n for link in bsObj.findAll(\"a\", href=re.compile(\"^(/|.*\"+includeUrl+\")\")):\n if link.attrs['href'] is not None:\n if link.attrs['href'] not in internalLinks:\n internalLinks.append(link.attrs['href'])\n return internalLinks\n \n#Retrieves a list of all external links found on a page\ndef getExternalLinks(bsObj, excludeUrl):\n externalLinks = []\n #Finds all links that start with \"http\" or \"www\" that do\n #not contain the current URL\n for link in bsObj.findAll(\"a\", href=re.compile(\"^(http|www)((?!\"+excludeUrl+\").)*$\")):\n if link.attrs['href'] is not None:\n if link.attrs['href'] not in externalLinks:\n externalLinks.append(link.attrs['href'])\n return externalLinks\n\ndef splitAddress(address):\n addressParts = address.replace(\"http://\", \"\").split(\"/\")\n return addressParts\n\ndef getRandomExternalLink(startingPage):\n html = urlopen(startingPage)\n bsObj = BeautifulSoup(html,\"html.parser\")\n externalLinks = getExternalLinks(bsObj, splitAddress(startingPage)[0])\n if len(externalLinks) == 0:\n internalLinks = getInternalLinks(bsObj, splitAddress(startingPage)[0])\n if len(internalLinks) == 0:\n return \"\"\n else:\n return getRandomExternalLink(internalLinks[random.randint(0, len(internalLinks)-1)])\n else:\n return externalLinks[random.randint(0, len(externalLinks)-1)]\n \ndef followExternalOnly(startingSite):\n externalLink = getRandomExternalLink(\"http://chenhongbiao.github.io/en/\")\n if externalLink != \"\":\n print(\"Random external link is: \"+externalLink)\n followExternalOnly(externalLink)\n \nfollowExternalOnly(\"http://chenhongbiao.github.io/en/\")" }, { "alpha_fraction": 0.7245989441871643, "alphanum_fraction": 0.7286096215248108, "avg_line_length": 48.93333435058594, "blob_id": "1ec4655442b01ac0c82ccfd8a8f20a920b6dd39d", "content_id": "b037a74477b82479e2448badf10544b80530df94", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 748, "license_type": "no_license", "max_line_length": 104, "num_lines": 15, "path": "/Web Scraping with Python/Chapter9/1.5-fileSubmission/1.5-fileSubmission/_1.5_fileSubmission.py", "repo_name": "chenhongbiao/My_Web_Spider", "src_encoding": "UTF-8", "text": "import requests\n\n#open() - open a flie and return a stream\nfiles = {\"uploadFile\": open(\"H:\\SpiderPic\\sci_hub-TE.jpg\",\"rb\")}\n#open(file, mode='r', buffering=-1, encoding=None, errors=None, newline=None, closefd=True, opener=None)\n#It defaults to 'r' which means open for reading in text mode.\n#In text mode, if encoding is not specified the encoding used is platform dependent\n#The default mode is 'r' (open for reading text, synonym of 'rt'). \n\n#For binary read-write access, the mode 'w+b' opens and truncates the file to 0 bytes. \n#'r+b' opens the file without truncation. #'r'\topen for reading; 'b'\tbinary mode\n\nr = requests.post(\"http://pythonscraping.com/pages/processing2.php\",files=files)\n#<input type=\"file\" name=\"uploadFile\">\nprint(r.text)" }, { "alpha_fraction": 0.6703020334243774, "alphanum_fraction": 0.6778523325920105, "avg_line_length": 31.216217041015625, "blob_id": "e25ea628bc1672897a7a02d84b1c8d46a825b94c", "content_id": "f4e11ca4e8c2249a6659884b136856fa5598aa28", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1192, "license_type": "no_license", "max_line_length": 73, "num_lines": 37, "path": "/Web Scraping with Python/Chapter5/2-createCsv/2-createCsv/_2_createCsv.py", "repo_name": "chenhongbiao/My_Web_Spider", "src_encoding": "UTF-8", "text": "#import csv\n#csvFile = open(\"test.csv\", 'w+')\n#try:\n #writer = csv.writer(csvFile)\n #writer.writerow(('number', 'number plus 2', 'number times 2'))\n #write a tuple as a row in the file\n #for i in range(10):\n #writer.writerow((i, i+2, i*2))\n#finally:\n #csvFile.close()\n #finally like except but it would execute regardless whether \n #the try statement have thrown an exception or not\nimport csv\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\n\nhtml = urlopen(\"http://en.wikipedia.org/wiki/Comparison_of_text_editors\")\nbsObj = BeautifulSoup(html,\"html.parser\")\n\n#The main comparison table is currently the first table on the page\ntable = bsObj.findAll(\"table\",{\"class\":\"wikitable\"})[0]\ncaption = table.find(\"caption\")\n#<caption>List of text editors</caption>\nrows = table.findAll(\"tr\")\n\ncsvFile = open(\"editors.csv\", 'wt', newline='', encoding='utf-8')\nwriter = csv.writer(csvFile)\ntry:\n\tfor row in rows:\n\t\tcsvRow = []\n\t\tfor cell in row.findAll(['td', 'th']):\n #find tag <td> or [th] under the cell tag\n\t\t\tcsvRow.append(cell.get_text())\n\t\twriter.writerow(csvRow)\n #write a Python list as a row into it\nfinally:\n csvFile.close()\n" }, { "alpha_fraction": 0.5814151763916016, "alphanum_fraction": 0.5848252177238464, "avg_line_length": 39.465518951416016, "blob_id": "cdb6f32f31f229604aa4ff74e3f08ca0f718f7c8", "content_id": "03f0c0a8b9981a9ab831a82d498e3a700c7ffe31", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2346, "license_type": "no_license", "max_line_length": 111, "num_lines": 58, "path": "/Web Scraping with Python/Chapter5/4-mysqlBasicExample/4-mysqlBasicExample/_4_mysqlBasicExample.py", "repo_name": "chenhongbiao/My_Web_Spider", "src_encoding": "UTF-8", "text": "#import pymysql\n\n#conn = pymysql.connect(host='127.0.0.1', unix_socket='/tmp/mysql.sock', \n# user='root', passwd=\"faustmeow\", db='mysql')\n# Connect to the database\n#conn = pymysql.connect(host='localhost',\n# user='root',\n# password='faustmeow',\n# db='mysql')\n#cur = conn.cursor()\n#cur.execute(\"USE scraping\")\n#cur.execute(\"SELECT * FROM pages\")\n#print(cur.fetchone())\n#print(cur.fetchone())\n#cur.close()\n#conn.close()\n\n###########################################\nimport pymysql.cursors\n\n# Connect to the database\nconnection = pymysql.connect(host='localhost',\n user='root',\n password='faustmeow', #[email protected] can contact me~ \n db='mysql',\n charset='utf8mb4',\n cursorclass=pymysql.cursors.DictCursor)\n#with expression [as variable]: \n# with-block \n#It is designed to replaced \"try ... finally ...\"\n#\"with\" keyword like \"if/while/try/for\" followed \":\"\n\n#the __enter__ and __exit__ function is defined in the object of the expression\n\n#this statement works like this:\n#first, it would exe the function called \"__enter__\" and return an object\n#the object would be catched by the variable which is behind the \"as\"\n#then, it would exe the \"with-blcok \" \n#finally, no matter the expression exe is okay or not, it would exe \"__exit__\" function\ntry:\n with connection.cursor() as cursor:\n #so in this case, the connection.cursor() would create an cursor object and \n #use it do something and close it(cur.close() ) no matter of the things it do having exceptions or not \n sql = \"INSERT INTO `users` (`email`, `password`) VALUES (%s, %s)\"\n cursor.execute(sql, ('[email protected]', 'very-secret'))\n #so the email \"[email protected]\" and password \"very-secret\" insert into the database\n\n # connection is not autocommit by default. So you must commit to save your changes.\n connection.commit()\n\n with connection.cursor() as cursor:\n # Read a single record\n sql = \"SELECT `id`, `password` FROM `users` WHERE `email`=%s\"\n cursor.execute(sql, ('[email protected]',))\n result = cursor.fetchone()\n print(result)\nfinally:\n connection.close()" }, { "alpha_fraction": 0.6507378816604614, "alphanum_fraction": 0.6605762243270874, "avg_line_length": 29.29787254333496, "blob_id": "ec21e3919af72e1dd37230feae79959ee02140bc", "content_id": "9dbe4ac98385ade14ede476b7b32bc9de62b7618", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1423, "license_type": "no_license", "max_line_length": 83, "num_lines": 47, "path": "/Web Scraping with Python/Chapter5/1.5-getPageMedia/1.5-getPageMedia/_1.5_getPageMedia.py", "repo_name": "chenhongbiao/My_Web_Spider", "src_encoding": "UTF-8", "text": "import os\nfrom urllib.request import urlretrieve\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\n\ndownloadDirectory = \"H:\\SpiderPic\"\nbaseUrl = \"https://m.douban.com\"\n\ndef getAbsoluteURL(baseUrl, source):\n if source.startswith(\"http://www.\"):\n url = \"http://\"+source[11:]\n elif source.startswith(\"http://\") or source.startswith(\"https://\"):\n url = source\n elif source.startswith(\"www.\"):\n url = source[4:]\n url = \"http://\"+source\n else:\n url = baseUrl+\"/\"+source\n if url.endswith(\".jpg\"):\n return url\n else:\n return None\n #if baseUrl not in url:\n #return None\n\ndef getDownloadPath(baseUrl, absoluteUrl, downloadDirectory):\n path = absoluteUrl.replace(\"www.\", \"\")\n path = path.replace(baseUrl, \"\")\n path = downloadDirectory+path\n directory = os.path.dirname(path)\n\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n return path\n\nhtml = urlopen(\"https://m.douban.com/photos/album/1627562414/\")\nbsObj = BeautifulSoup(html,\"html.parser\")\ndownloadList = bsObj.findAll(\"img\")\nfilename=\"a\"\nfor download in downloadList:\n fileUrl = getAbsoluteURL(baseUrl, download[\"src\"])\n if fileUrl is not None:\n print(fileUrl)\n #urlretrieve(fileUrl, getDownloadPath(baseUrl, fileUrl, downloadDirectory))\n filename = filename+\"a\"\n urlretrieve(fileUrl,\"H:\\SpiderPic\\\\\"+filename+\".jpg\")" }, { "alpha_fraction": 0.6323613524436951, "alphanum_fraction": 0.6432247161865234, "avg_line_length": 50.47058868408203, "blob_id": "846c5c7b1f0933c3b7589c43da1021a1e557467f", "content_id": "3a2964a24994cf487d726125daad6a551b774fd1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1749, "license_type": "no_license", "max_line_length": 218, "num_lines": 34, "path": "/Web Scraping with Python/Chapter3/2-crawlWikipedia/2-crawlWikipedia/_2_crawlWikipedia.py", "repo_name": "chenhongbiao/My_Web_Spider", "src_encoding": "UTF-8", "text": "from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport re\n\npages = set()\ndef getLinks(pageUrl):\n global pages\n html = urlopen(\"http://en.wikipedia.org\"+pageUrl)\n bsObj = BeautifulSoup(html,\"html.parser\")\n try:\n print(bsObj.h1.get_text())\n print(bsObj.find(id =\"mw-content-text\").findAll(\"p\")[0].get_text())\n #! 'gbk' codec can't encode character '\\xa0' in position 464: illegal multibyte sequence\n #the page is encode by utf-8 and we decode it correctly but [the win8 terminal \"print\"] want to show you as gbk encode\n #some utf-8 decode 10 cannot transfer into gbk code, so throw the exception\n\n #so... Let's show it as utf-8? well, it's hard to do this in vs 2015, so I change my console as \"cmd\", run .py in cmd =-=\n print(bsObj.find(id=\"ca-edit\").find(\"span\").find(\"a\").attrs['href'])\n except AttributeError:\n print(\"This page is missing something! No worries though!\")\n\n for link in bsObj.findAll(\"a\", href=re.compile(\"^(/wiki/)\")):\n #Hey, there is page href that it didn't cover \n #<a href=\"//shop.wikimedia.org\" title=\"Visit the Wikipedia store\">Wikipedia store</a>\n #<a href=\"https://donate.wikimedia.org/wiki/Special:FundraiserRedirector?utm_source=donate&amp;utm_medium=sidebar&amp;utm_campaign=C13_en.wikipedia.org&amp;uselang=en\" title=\"Support us\">Donate to Wikipedia</a>\n #a good web scraper can be built in a good html website...\n if 'href' in link.attrs:\n if link.attrs['href'] not in pages:\n #We have encountered a new page\n newPage = link.attrs['href']\n print(newPage)\n pages.add(newPage)\n getLinks(newPage)\ngetLinks(\"\")" }, { "alpha_fraction": 0.7476635575294495, "alphanum_fraction": 0.7551401853561401, "avg_line_length": 34.66666793823242, "blob_id": "6cf8b3d2e1410ed907625399daf6e30a594d61b6", "content_id": "0d781ca4bf84660d25fbde93c1e03f5b7b3c2569", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 535, "license_type": "no_license", "max_line_length": 82, "num_lines": 15, "path": "/Web Scraping with Python/Chapter1/1-basicExample/1-basicExample/_1_basicExample.py", "repo_name": "chenhongbiao/My_Web_Spider", "src_encoding": "UTF-8", "text": "\nfrom urllib.request import urlopen\n# only import a function urlopen\n# if you use python 2.x you will use urllib2\n# in python 3.x, urllib2 was renamed as urllib and was spilt into three submodules\n# they are urllib.request , urllib.parse, and urllib.error\n\nurl = \"http://chenhongbiao.github.com\"\nhtml = urlopen(url)\nprint(html)\n#html is a http Resopnse object = -=\n# the function urlopen can open and ... read a remote object\nprint(html.read())\n\n#notice that Chinese would not show in the terminal in decode\n#it would show the raw code" }, { "alpha_fraction": 0.6836861968040466, "alphanum_fraction": 0.6836861968040466, "avg_line_length": 35.54545593261719, "blob_id": "f37c2cf88ab3dcd822b60f1a8c584369cbeb5077", "content_id": "0c86cecf5436045c671c8215474349ac8ce3f889", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 803, "license_type": "no_license", "max_line_length": 91, "num_lines": 22, "path": "/Web Scraping with Python/Chapter9/3-cookies/3-cookies/_3_cookies.py", "repo_name": "chenhongbiao/My_Web_Spider", "src_encoding": "UTF-8", "text": "import requests\n\nparams = {\"username\":\"anything\", \"password\":\"password\"}\nr = requests.post(\"http://pythonscraping.com/pages/cookies/welcome.php\", data=params)\nprint(r.headers)\nprint(\"Cookie is set to: \")\nprint(r.cookies.get_dict())\nprint(\"---------------------\")\nprint(\"Let's use cookie to go to profile page\")\nr = requests.get(\"http://pythonscraping.com/pages/cookies/profile.php\", cookies= r.cookies)\nprint(r.text)\n\n#session = requests.Session()\n#s = session.post(\"http://pythonscraping.com/pages/cookies/welcome.php\", data=params)\n\n#print(s.headers)\n#print(\"Cookie is set to: \")\n#print(s.cookies.get_dict())\n#print(\"---------------------\")\n#print(\"Let's use cookie to go to the profile page:\")\n#s = session.get(\"http://pythonscraping.com/pages/cookies/profile.php\",cookies=s.cookies)\n#print(s.text)" }, { "alpha_fraction": 0.6805194616317749, "alphanum_fraction": 0.6883116960525513, "avg_line_length": 40.621620178222656, "blob_id": "c88b3505ed1c6db1eaf22997bdf23bdbc5328de1", "content_id": "87892803690e0d887bf254575355ff1ff188a012", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1554, "license_type": "no_license", "max_line_length": 105, "num_lines": 37, "path": "/Web Scraping with Python/Chapter1/3-exceptionHandling/3-exceptionHandling/_3_exceptionHandling.py", "repo_name": "chenhongbiao/My_Web_Spider", "src_encoding": "UTF-8", "text": "from urllib.request import urlopen\nfrom urllib.error import HTTPError\nfrom bs4 import BeautifulSoup\n\ndef getTitle(url):\n try:\n html = urlopen(url)\n except HTTPError as e:\n #The page is not found on the server - 404 (or there was some error in retrieving it)\n #urlopen() would throw HTTPError\n #But If the server is not found at all - let say, mistype the url, urlopen() return a None object\n #so the html you got is a None object\n return None\n try:\n bsObj = BeautifulSoup(html.read(),\"html.parser\")\n title = bsObj.body.h1\n #if the tag you want is not existed, BeautifulSoup would return a None object\n #if you want to call something(like Attribute) above the None object, it will throw a Error\n #An AttributeError: 'xx' object has no attribute 'xx'\n #Always remember every tag may be not existed\n except AttributeError as e:\n return None\n return title\n\ntitle = getTitle('http://www.pythonscraping.com/pages/page1.html')\n#title = getTitle(\"http://www.chenhongbiao.github.com\")\nif title == None:\n print(\"Title could not be found\")\nelse:\n print(title)\n\n#Hei, guy. I encounter an error here\n#if I connect the \"God\" wireless, it would occur the error \n#用户代码未处理 urllib.error.URLError Message: <urlopen error [Errno 11001] getaddrinfo failed>\n#but befor 1 hour ago, it work well. Now, it crash! =- =\n#but if I change the wireless \"HP\", it would be fine. Why? En...\n#At least I know there is nothing wrong in my code and my machine. :) " }, { "alpha_fraction": 0.7108433842658997, "alphanum_fraction": 0.7108433842658997, "avg_line_length": 43.266666412353516, "blob_id": "f0d7a8f4d3e314d55c55661fc31dbdd34a5c719d", "content_id": "f42e8f899a7bae033a0e765fd82ce3d8bdf72ca7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 664, "license_type": "no_license", "max_line_length": 97, "num_lines": 15, "path": "/Web Scraping with Python/Chapter9/1-simpleForm/1-simpleForm/_1_simpleForm.py", "repo_name": "chenhongbiao/My_Web_Spider", "src_encoding": "UTF-8", "text": "import requests\nparams = {\"firstname\":\"cat\", \"lastname\":\"faust\"} \n#r = requests.post(\"http://pythonscraping.com/pages/files/form.html\",data=params)\nr = requests.post(\"http://pythonscraping.com/files/processing.php\", data=params)\nprint(r)\nprint(r.text) #yeah, requests.post() response, using .text\n\n#params = {'email_addr': '[email protected]'}\n#r = requests.post(\"http://post.oreilly.com/client/o/oreilly/forms/quicksignup.cgi\", data=params)\n#print(r.text)\n\n#params = {\"email_addr\":\"[email protected]\"}\n#r = requests.post(\"https://conduit.ipost.com/forms.cgi\", data = params)\n#print(r.text)\n#<form action=\"https://conduit.ipost.com/forms.cgi\" method=\"post\" id=\"newsletter-subscribe\">\n" }, { "alpha_fraction": 0.6757493019104004, "alphanum_fraction": 0.6771117448806763, "avg_line_length": 47.93333435058594, "blob_id": "29c12ed76cd4169a1deeb15551085757ddd327cb", "content_id": "5eca97660038d0913c3fd8870bb3f4fe3a5837a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 734, "license_type": "no_license", "max_line_length": 105, "num_lines": 15, "path": "/Web Scraping with Python/Chapter3/1-getWikiLinks/1-getWikiLinks/_1_getWikiLinks.py", "repo_name": "chenhongbiao/My_Web_Spider", "src_encoding": "UTF-8", "text": "from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport re\nhtml = urlopen(\"https://en.wikipedia.org/wiki/Kevin_Bacon\")\n#html = urlopen(\"https://chenhongbiao.github.io/\")\nbsObj = BeautifulSoup(html.read(),\"html.parser\")\n#links = bsObj.findAll(\"a\") #findAll() return a Python list of tags object \n#for link in links:\n# if \"href\" in link.attrs: #link is a tag object which attrs is a Python dict object\n# print(link.attrs[\"href\"]) #href - Hypertext REFerence\n\nfor link in bsObj.find(\"div\", {\"id\":\"bodyContent\"}).findAll(\"a\", href=re.compile(\"^(/wiki/)((?!:).)*$\")):\n if 'href' in link.attrs:\n print(link.attrs['href'])\n#((?!:).)* meaning that can be any character with any long but not ':' show in it\n" }, { "alpha_fraction": 0.6948356628417969, "alphanum_fraction": 0.7230046987533569, "avg_line_length": 34.5, "blob_id": "1207a9427a955bab8fa2ada6c5efa0b667ff7376", "content_id": "036f80453d0016fe21bcef69ed0c98443b8c34f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 432, "license_type": "no_license", "max_line_length": 84, "num_lines": 12, "path": "/Web Scraping with Python/Chapter6/2-getUtf8Text.py", "repo_name": "chenhongbiao/My_Web_Spider", "src_encoding": "UTF-8", "text": "from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\n\nhtml = urlopen(\"https://chenhongbiao.github.io/cn/2015/03/64bit-Assembly-Debug/\")\nbsObj = BeautifulSoup(html,\"html.parser\")\ncontent = bsObj.find(\"article\", {\"class\":\"content\"}).find(\"h3\", id=\"为什么\").get_text()\n#<article class=\"content\">\n#content = bytes(content, \"UTF-8\")\n#content = content.decode(\"UTF-8\")\nprint(content)\n#print(bsObj)\n#print(bsObj.get_text())\n" }, { "alpha_fraction": 0.602034330368042, "alphanum_fraction": 0.607120156288147, "avg_line_length": 36.023529052734375, "blob_id": "81a47a1841e0e2952ad7e6346104c94daa809d92", "content_id": "60df131e7c3456c78c9453432e11a4dc42048cfe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3156, "license_type": "no_license", "max_line_length": 113, "num_lines": 85, "path": "/Web Scraping with Python/Chapter5/6-DegreesCrawlWiki/6-DegreesCrawlWiki/_6_DegreesCrawlWiki.py", "repo_name": "chenhongbiao/My_Web_Spider", "src_encoding": "WINDOWS-1258", "text": "from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport re\nimport pymysql\n\n#CREATE TABLE pages (id INT NOT NULL AUTO_INCREMENT,\n #url VARCHAR(255) NOT NULL,\n #created TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,\n #PRIMARY KEY (id));\n\n#CREATE TABLE links (id INT NOT NULL AUTO_INCREMENT,\n #fromPageId INT NULL,\n #toPageId INT NULL,\n #created TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,\n #PRIMARY KEY (id));\n\n#\n#identify a link by saying, ¡°There exists a link on page A, which connects to page B.\n#That is, INSERT INTO links (fromPageId, toPageId) VALUES (A, B); (where ¡°A¡± and\n#¡°B¡± are the unique IDs for the two pages)\n#\n\n# Connect to the database\nconn = pymysql.connect(host='localhost',\n user='root',\n password='faustmeow',\n db='mysql',\n charset='utf8mb4')\n\ncur = conn.cursor()\ncur.execute(\"USE scraping\")\n\ndef pageScraped(url):\n cur.execute(\"SELECT * FROM pages WHERE url = %s\", (url))\n if cur.rowcount == 0:\n return False\n page = cur.fetchone()\n #print(page) - page as a Python dict object?\n #print(page[0]) - \n cur.execute(\"SELECT * FROM links WHERE fromPageId = %s\", (int(page[0])))\n #False - We have encountered a new page, add it and search it for links\n #this url didn't use as a fromPage\n if cur.rowcount == 0:\n return False\n return True\n\ndef insertPageIfNotExists(url):\n cur.execute(\"SELECT * FROM pages WHERE url = %s\", (url))\n if cur.rowcount == 0:\n cur.execute(\"INSERT INTO pages (url) VALUES (%s)\", (url))\n conn.commit()\n return cur.lastrowid\n else:\n return cur.fetchone()[0]\n\ndef insertLink(fromPageId, toPageId):\n cur.execute(\"SELECT * FROM links WHERE fromPageId = %s AND toPageId = %s\", (int(fromPageId), int(toPageId)) )\n if cur.rowcount == 0 :\n cur.execute(\"INSERT INTO links (fromPageId, toPageId) VALUES (%s, %s)\", (int(fromPageId), int(toPageId)))\n conn.commit()\n\ndef getLinks(pageUrl, recursionLevel):\n global pages\n if recursionLevel > 4:\n return\n #hit the six point, return\n #get the pageId and if it's new page, insert it and return its id\n pageId = insertPageIfNotExists(pageUrl)\n html = urlopen(\"http://en.wikipedia.org\"+pageUrl)\n bsObj = BeautifulSoup(html,\"html.parser\")\n #for all internal article links of this page, \n for link in bsObj.findAll(\"a\", href=re.compile(\"^(/wiki/)((?!:).)*$\")):\n insertLink(pageId, insertPageIfNotExists(link.attrs['href']))\n #build the link relationship in links table\n if not pageScraped(link.attrs['href']):\n #False - We have encountered a new page, add it and search it for links\n newPage = link.attrs['href']\n print(newPage)\n getLinks(newPage, recursionLevel+1)\n else: \n print(\"Skipping: \"+str(link.attrs['href'])+\" found on \"+pageUrl)\n\ngetLinks(\"/wiki/Kevin_Bacon\", 0) \ncur.close()\nconn.close()" }, { "alpha_fraction": 0.686956524848938, "alphanum_fraction": 0.7565217614173889, "avg_line_length": 27.75, "blob_id": "1118305509edcf095e9a2c6f54f8f705f8871ec6", "content_id": "391f64d252d7eff5d20ee4bbebdfb86c619db3d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 115, "license_type": "no_license", "max_line_length": 66, "num_lines": 4, "path": "/README.md", "repo_name": "chenhongbiao/My_Web_Spider", "src_encoding": "UTF-8", "text": "The purpose of this project is to create my web spider in 10 days.\n\nLanguage: Python 3.5;\nIDE: Visual Studio 2015;\n" }, { "alpha_fraction": 0.713214635848999, "alphanum_fraction": 0.7216494679450989, "avg_line_length": 41.63999938964844, "blob_id": "86bd8b1ec936bfed97fe8e6befce3a20ee2c0da2", "content_id": "3860cc47cc4214ab10c03c806bf501063a433baf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1067, "license_type": "no_license", "max_line_length": 91, "num_lines": 25, "path": "/Web Scraping with Python/Chapter1/2-beautifulSoup/2-beautifulSoup/_2_beautifulSoup.py", "repo_name": "chenhongbiao/My_Web_Spider", "src_encoding": "UTF-8", "text": "from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\n\nhtml = urlopen('http://www.pythonscraping.com/pages/warandpeace.html')\n#print(html.read()) # get the whole HTML content of this page\nprint(html.read())\nprint(\"-------------------------------------------------\")\nbsObj = BeautifulSoup(html.read(),\"html.parser\")\nprint(bsObj)\n#UserWarning: No parser was explicitly specified, so I'm using the best\n#available HTML parser for this system (\"html.parser\"). This usually isn't a\n#problem, but if you run this code on another system, or in a different\n#virtual environment, it may use a different parser and behave differently.\n\n#To get rid of this warning, change this:\n#BeautifulSoup([your markup])\n#to this:\n#BeautifulSoup([your markup], \"html.parser\")\nprint(\"Let try to print its h1 if it's existed\")\nprint(bsObj.h1)\n#print(bsObj.html.h1) #something it work but something it say it don't have this attributes\n#print(bsObj.body.h1)\n#print(bsObj.html.body.h1)\n#notice that h1 is in the two deep layer (html->body->h1)\n#and above ways are okay to access h1 \n" }, { "alpha_fraction": 0.659547746181488, "alphanum_fraction": 0.6633166074752808, "avg_line_length": 43.22222137451172, "blob_id": "a75437720560d17c1a7ebe151ac4ddea6290be5a", "content_id": "ccde478ce01bdb44a18c54da193053a365e47092", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 796, "license_type": "no_license", "max_line_length": 104, "num_lines": 18, "path": "/Web Scraping with Python/Chapter2/3-findDescendants/3-findDescendants/_3_findDescendants.py", "repo_name": "chenhongbiao/My_Web_Spider", "src_encoding": "UTF-8", "text": "from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nhtml = urlopen(\"http://www.pythonscraping.com/pages/page3.html\")\nbsObj = BeautifulSoup(html.read(),\"html.parser\")\n\nfor sibling in bsObj.find(\"table\",{\"id\":\"giftList\"}).tr.next_siblings:\n print(sibling)\n#get rid of the first row of the table - the title of the table \n#.tr.next_siblings\n\n#yeah. we also have .next_sibling and .previous_sibling which are only return one tag* instead of a list\nprint(\"------------------------------------------------------------------\")\n\nprint(bsObj.find(\"img\",{\"src\":\"../img/gifts/img1.jpg\"}).parent.previous_sibling.get_text())\n#yeah, we also have .parents (so you can say hello to your father's father's father...)\n\nfor child in bsObj.find(\"table\",{\"id\":\"giftList\"}).children:\n print(child)\n" }, { "alpha_fraction": 0.6616161465644836, "alphanum_fraction": 0.680134654045105, "avg_line_length": 28, "blob_id": "5d5a1d894876a335941bcb6578aba9a5ec67d7d9", "content_id": "13958bc2d118eaa593e715a69fe04622bab21c29", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1584, "license_type": "no_license", "max_line_length": 83, "num_lines": 41, "path": "/Web Scraping with Python/Chapter5/7-sendEmail/7-sendEmail/_7_sendEmail.py", "repo_name": "chenhongbiao/My_Web_Spider", "src_encoding": "UTF-8", "text": "#import smtplib\n#from email.mime.text import MIMEText\n\n#msg = MIMEText(\"The body of the mail is here\")\n#msg['Subject'] = \"An Email Alert\"\n#msg[\"From\"] = \"[email protected]\"\n#msg[\"To\"] = \"[email protected]\"\n\n#s = smtplib.SMTP(\"localhost\")\n#s.send_message(msg)\n#s.quit()\n\n#163.com - email server\n#servername serverIP SSL port non-SSL port\n#SMTP smtp.163.com 465/994 25\n#\n#coding:utf-8\nimport smtplib \nfrom email.mime.text import MIMEText # 引入smtplib和MIMEText\n\nhost = 'smtp.163.com' # 设置发件服务器地址\nport = 25 # 设置发件服务器端口号。注意,这里有SSL和非SSL两种形式\nsender = '[email protected]' # 设置发件邮箱,一定要自己注册的邮箱\npwd = 'xxxxxxx' # 设置发件邮箱的密码,等会登陆会用到\nreceiver = '[email protected]' # 设置邮件接收人,这里是我的公司邮箱\nbody = '<h1>What your name</h1><p>Hello Meow! How are you?</p>' # 设置邮件正文,这里是支持HTML的\n#body = 'Hello Meow!'\n\nmsg = MIMEText(body, 'html') # 设置正文为符合邮件格式的HTML内容\nmsg['subject'] = 'Hello world' # 设置邮件标题\nmsg['from'] = sender # 设置发送人\nmsg['to'] = receiver # 设置接收人\n\ns = smtplib.SMTP(host, port) # 注意!如果是使用SSL端口,这里就要改为SMTP_SSL\ns.login(sender, pwd) # 登陆邮箱\ns.sendmail(sender, receiver, msg.as_string()) # 发送邮件!\n\nprint (\"successful!\") # 发送成功就会提示\n\n#因为没有附件,所以代码部分很简单,如果带了附件,推荐下Envelope,\n#描述里说是Mailing for human beings(模仿requests)" } ]
24
rjfarmer/multimesa
https://github.com/rjfarmer/multimesa
953f4098978a279d80fe8e217b1970dd2a4fa5f1
e9c9739acdb2200ff729d4ebea3405da2a564de0
308434746a6b49b6735cdaea02fc7cdccfa0ea68
refs/heads/master
2021-03-12T22:42:31.107445
2018-11-04T18:10:53
2018-11-04T18:10:53
30,721,088
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.790123462677002, "alphanum_fraction": 0.790123462677002, "avg_line_length": 31.399999618530273, "blob_id": "e07fe6b917bb69e7cefb88bc4942e3b933c93d2e", "content_id": "5ba6fc90afb2cf495fbc45986c01da9564b42d23", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 162, "license_type": "permissive", "max_line_length": 79, "num_lines": 5, "path": "/README.md", "repo_name": "rjfarmer/multimesa", "src_encoding": "UTF-8", "text": "# multimesa\nScript to make it easy to generate large number of folders with MESA inlists in\n\nSee examples for how to run:\npython /path/to/multimesa.py example.py\n" }, { "alpha_fraction": 0.7008426785469055, "alphanum_fraction": 0.7528089880943298, "avg_line_length": 23.55172348022461, "blob_id": "d9477eeb72145fd4e91cf5f67a449fa4a3fa5411", "content_id": "076d2845053b32efe69ba16473ca6ca15b98f056", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 712, "license_type": "permissive", "max_line_length": 93, "num_lines": 29, "path": "/example2.py", "repo_name": "rjfarmer/multimesa", "src_encoding": "UTF-8", "text": "#An example input file for multimesa.py\n\n#run command is python multimesa.py example2.py\n\n#Location to write out folders too\noutput_folder=\"output/\"\n\n# if set to 1 we number the folders otherwise use the _name value\nfolder_num=1\n\n\n#Defaults assume varaibles are linearly looped and that the name of varaible is the mesa one.\n#Assumes varaibles exist inside control_inlist unless specified as star \nmass_list=[7.0,8.0,9.0]\nmass_name=\"initial_mass\"\nmass_section=\"control\"\n\nvar_list=[0.00001,0.0001,0.001]\nvar_name=\"varcontrol_target\"\nvar_section=\"control\"\n\nmesh_list=[0.1,0.5,1.0]\nmesh_name=\"mesh_delta_coeff\"\nmesh_section=\"control\"\n\n\nrot_list=[0.0,0.25,0.5]\nrot_name=\"new_omega_div_omega_crit\"\nrot_section=\"star\"\n" }, { "alpha_fraction": 0.6697006225585938, "alphanum_fraction": 0.6780838370323181, "avg_line_length": 25.056249618530273, "blob_id": "f7d2ae5a8b952ebd77444400cafb22073faafca5", "content_id": "312bb502b0c02092194cbf22ea0e3841e4d309b5", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4175, "license_type": "permissive", "max_line_length": 73, "num_lines": 160, "path": "/multimesa.py", "repo_name": "rjfarmer/multimesa", "src_encoding": "UTF-8", "text": "#Copyright (c) 2015, Robert Farmer [email protected]\n\n#Permission to use, copy, modify, and/or distribute this software for any\n#purpose with or without fee is hereby granted, provided that the above\n#copyright notice and this permission notice appear in all copies.\n\n#THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n#WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n#MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n#ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n#WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n#ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n#OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\n\n#This is desgined to read in a file that specifies a series of options\n#These options are then iterated over to create a series of mesa inlists\n#in different folders.\n#Thus creating an easy way to create lots of mesa models\nfrom __future__ import print_function\nimport numpy as np\nimport getopt\nimport sys as sys\nimport itertools\nimport shutil as s\nimport os, errno\n\ndef mkdir_p(path):\n try:\n os.makedirs(path)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else: raise\n\nopts, extra = getopt.getopt(sys.argv[1:],'')\n\n#Input varaibles\ninput_file=extra[0]\n\ntry:\n\te=__import__(input_file.replace(\".py\",''))\nexcept IOError:\n\tprint(\"File doesnt exist \",input_file)\n\tsys.exit(1)\nexcept SyntaxError:\n\traise\n\ntry:\n\toutput_folder=e.output_folder\nexcept AttributeError:\n\tprint(\"No output_folder set\")\n\tsys.exit(1)\t\n\ntry:\n\tinlist_name=e.inlist_name\nexcept AttributeError:\n\tinlist_name='inlist_cluster'\n\n#Access the different avraibles we want to loop over\nmesa_name=[]\nmesa_loop=[]\nmesa_sec=[]\nshort_name=[]\nfor i in e.__dict__:\n\tif i[-5:] == \"_name\":\n\t\tshort_name.append(i[:-5])\n\t\tmesa_name.append(getattr(e,short_name[-1]+\"_name\"))\n\t\ttry:\n\t\t\tminX=getattr(e,short_name[-1]+\"_min\")\n\t\t\tmaxX=getattr(e,short_name[-1]+\"_max\")\n\t\t\tuseRng=True\n\t\t\ttry:\n\t\t\t\trngX=getattr(e,short_name[-1]+\"_range\")\n\t\t\texcept AttributeError:\n\t\t\t\tuseRng=False\n\t\t\t\trngX=1\n\t\t\ttry:\n\t\t\t\tspaceX=getattr(e,short_name[-1]+\"_step\")\n\t\t\texcept AttributeError:\n\t\t\t\tspaceX=1\n\t\t\ttry:\n\t\t\t\ttyp=getattr(e,short_name[-1]+\"_type\")\n\t\t\texcept AttributeError:\n\t\t\t\ttyp='linear'\t\n\t\t\tif typ=='log':\n\t\t\t\tminX=np.log10(minX)\n\t\t\t\tmaxX=np.log10(maxX)\n\t\t\telif typ=='linear':\n\t\t\t\tminX=minX\n\t\t\t\tmaxX=maxX\n\t\t\telse:\n\t\t\t\tprint(\"Invalid loop type either linear or log or set name_list\",typ)\n\t\t\t\tsys.exit(1)\n\t\t\t\n\t\t\tif useRng:\n\t\t\t\tmesa_loop.append(np.linspace(minX,maxX,rngX))\n\t\t\telse:\n\t\t\t\tmesa_loop.append(np.arange(minX,maxX,spaceX))\n\t\texcept AttributeError:\n\t\t\ttry:\n\t\t\t\tmesa_loop.append(getattr(e,short_name[-1]+\"_list\"))\n\t\t\texcept AttributeError:\n\t\t\t\ttry:\n\t\t\t\t\tmesa_loop.append([getattr(e,short_name[-1]+\"_value\")])\n\t\t\t\texcept:\n\t\t\t\t\tprint(\"Bad name\", short_name[-1])\n\t\t\t\t\tsys.exit(0)\n\t\ttry:\n\t\t\tmesa_sec.append(getattr(e,short_name[-1]+\"_section\"))\n\t\texcept AttributeError:\n\t\t\tmesa_sec.append(\"control\")\n\n#Now to make the inlists\nk=1\nfor l in itertools.product(*mesa_loop):\n\tif e.folder_num:\n\t\tname=str(k)\n\telse:\n\t\tname=[]\n\t\t#Dont include range=1 varaibles\n\t\tfor i in range(len(mesa_name)):\n\t\t\tif len(mesa_loop[i])>1:\n\t\t\t\tname.append(mesa_name[i])\n\t\t\t\tname.append(\"_\")\n\t\t\t\tname.append(l[i])\n\t\t\t\tname.append(\"_\")\n\t\tname[-1]=''\n\t\tname=''.join(map(str, name))\n\t\n\toutF=os.path.join(output_folder,name)\n\tmkdir_p(outF)\n\t\n\t#get possible extra values\n\textra=[]\n\ttry:\n\t\textraName,extraVal,extraSec=e.callback(short_name,l)\n\texcept AttributeError:\n\t\textraName=[]\n\t\textraVal=[]\n\t\textraSec=[]\n\t\tpass\n\t\n\toutName=mesa_name+extraName\n\toutVal=list(l)+extraVal\n\toutSec=mesa_sec+extraSec\n\t\n\twith open(os.path.join(outF,inlist_name),'w') as f:\n\t\tf.write(\"&star_job\\n\")\n\t\tfor i in range(len(outName)):\n\t\t\tif outSec[i]==\"star\":\t\t\n\t\t\t\tf.write(outName[i]+\" = \"+str(outVal[i])+\"\\n\")\n\t\tf.write(\"/ ! end of star_job namelist\\n\")\n\t\tf.write(\"&controls\\n\")\n\t\tfor i in range(len(outName)):\n\t\t\tif outSec[i]==\"control\":\t\t\n\t\t\t\tf.write(outName[i]+\" = \"+str(outVal[i])+\"\\n\")\n\t\tf.write(\"/ ! end of control_job namelist\\n\")\n\tprint(k)\n\tk=k+1\n\t\t\t\t\t\n" }, { "alpha_fraction": 0.7333333492279053, "alphanum_fraction": 0.7616666555404663, "avg_line_length": 22.959999084472656, "blob_id": "e6e673f4d13fd8bb1c715d27246c380ce084344c", "content_id": "4cef1e458bd9152291264553dc383882f127db81", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 600, "license_type": "permissive", "max_line_length": 93, "num_lines": 25, "path": "/example.py", "repo_name": "rjfarmer/multimesa", "src_encoding": "UTF-8", "text": "#An example input file for multimesa.py\n\n#run command is python multimesa.py example.py\n\n#Location to write out folders too\noutput_folder=\"output/\"\n\n#Name to call inlist that get written if not set defaults to inlist_cluster\ninlist_name='inlist_cluster'\n\nfolder_num=1\n# if set to 1 we number the folders otherwise use the var names\n\n\n#Defaults assume varaibles are linearly looped and that the name of varaible is the mesa one.\nmass_min=1.0\nmass_max=10.0\nmass_range=3\nmass_name=\"initial_mass\"\nmass_type=\"linear\"\nmass_section='control'\n\nz_list=[0.01,0.02,0.03]\nz_name=\"initial_z\"\nz_section='control'\n\n" }, { "alpha_fraction": 0.6906226277351379, "alphanum_fraction": 0.71944659948349, "avg_line_length": 26.10416603088379, "blob_id": "bd18cced748796b9079537b2206a096014f396a0", "content_id": "aebaefb250f26121bdff7e70d44b4b3ee5b17345", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2602, "license_type": "permissive", "max_line_length": 93, "num_lines": 96, "path": "/example3.py", "repo_name": "rjfarmer/multimesa", "src_encoding": "UTF-8", "text": "#An example input file for multimesa.py\n\n#run command is python multimesa.py example3.py\n\n#Location to write out folders too\noutput_folder=\"output/\"\n\n# if set to 1 we number the folders otherwise use the _name value\nfolder_num=1\n\n\n#Defaults assume varaibles are linearly looped and that the name of varaible is the mesa one.\n#Assumes varaibles exist inside control_inlist unless specified as star \nsemi_list=[0.0,0.001,0.01,0.1]\nsemi_name=\"alpha_semiconvection\"\nsemi_section=\"control\"\n\nover_list=[0.0,0.001,0.016,0.2]\nover_name='overshoot_f_above_nonburn'\nover_section='control'\n\nthermo_list=[0.0,0.1,1.0,10.0]\nthermo_name=\"thermo_haline_coeff\"\nthermo_section='control'\n\nam_list=[0.0,0.5,1.0,1.5]\nam_name=\"am_nu_factor\"\nam_section=\"control\"\n\n\n\n\n#This function is called once per iteration with the current set of parameters\n#This then lets us set other parameters which may be dependant on the inputs \n#For instance lets say we have mass=8,9,10 and parameter y=1,2,3\n#we could say when mass<9 set z=0.01 when mass>=9 set z=0.02 unless y <2 in which case z=0.0\n\n#It should return 3 lists, where the lists are the mesa_name,value and section\n#Note if you have set folder_num=0 this will not add these names to the output folder path\n\n#If you dont care about this stuff just comment out the function, it doesn't need to exist\n\n#Note names are the short name ie for mass_name='initial_mass', names='mass' not initial_mass\ndef callback(names,val):\n\toutName=[]\n\toutVal=[]\n\toutSec=[]\n\t\n\tsemi=0\n\tover=0\n\tthermo=0\n\tam=0\n\t#Loops over both lists at the same time \n\tfor i,j in zip(names,val):\n\t\tif i=='semi':\n\t\t\tsemi=float(j)\n\t\tif i=='over':\n\t\t\tover=float(j)\n\t\tif i=='thermo':\n\t\t\tthermo=float(j)\n\t\tif i=='am':\n\t\t\tam=float(j)\n\n\tif semi >0.0:\n\t\toutName.append('allow_semiconvective_mixing')\n\t\toutVal.append('.true.')\n\t\toutSec.append('control')\n\telse:\n\t\toutName.append('allow_semiconvective_mixing')\n\t\toutVal.append('.false.')\n\t\toutSec.append('control')\n\t\t\n\tif over >0.0:\n\t\toutName.append('overshoot_f_below_nonburn')\n\t\toutName.append('overshoot_f_above_burn_h')\n\t\toutName.append('overshoot_f_below_burn_h')\n\t\toutName.append('overshoot_f_above_burn_he')\n\t\toutName.append('overshoot_f_below_burn_he')\n\t\toutName.append('overshoot_f_above_burn_z')\n\t\toutName.append('overshoot_f_below_burn_z')\n\t\ta=[over]*7\n\t\toutVal=outVal+a\n\t\ta=['control']*7\n\t\toutSec=outSec+a\n\n\tif thermo >0.0:\n\t\toutName.append('allow_thermohaline_mixing')\n\t\toutVal.append('.true.')\n\t\toutSec.append('control')\n\telse:\n\t\toutName.append('allow_thermohaline_mixing')\n\t\toutVal.append('.false.')\n\t\toutSec.append('control')\n\t\t\n\t\t\n\treturn outName,outVal,outSec\n" } ]
5
nairobi222/indy-catalyst
https://github.com/nairobi222/indy-catalyst
b867355a48a817c4759fc627ee093633c45b5e45
dcbd80524ace7747ecfecd716ff932e9b571d69a
00a96c2f10efab746bebc818dbcc50e7a4819d4c
refs/heads/master
2020-06-09T19:41:33.598166
2019-06-13T23:59:40
2019-06-13T23:59:40
193,495,135
0
0
Apache-2.0
2019-06-24T11:47:47
2019-06-24T11:47:43
2019-06-22T23:37:59
null
[ { "alpha_fraction": 0.6403161883354187, "alphanum_fraction": 0.6758893132209778, "avg_line_length": 28.764705657958984, "blob_id": "7878071f56fa06f733d3c4a398ac0bdec2123721", "content_id": "14d89b6beeb376f92e4b329b0be8679796abfe44", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1012, "license_type": "permissive", "max_line_length": 160, "num_lines": 34, "path": "/agent/scripts/run_demo", "repo_name": "nairobi222/indy-catalyst", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nshopt -s nocasematch\n\ncd $(dirname $0)\n\nAGENT=$1\nif [[ $AGENT == \"faber\" ]] || [[ $AGENT == \"alice\" ]]; then\n\techo \"Preparing agent image...\"\n\tdocker build -q -t faber-alice-demo -f ../docker/Dockerfile.demo .. || exit 1\nelse\n echo \"Please specify which agent you want to run. Choose from 'faber' or 'alice'.\";\n\texit 1;\nfi\n\necho \"Starting $AGENT...\"\nif [[ $AGENT == \"faber\" ]]; then\n\tAGENT_FILE=\"demo/faber-pg.py\"\n\tAGENT_PORT=8020\n\tAGENT_PORT_RANGE=8020-8027\nelse\n\tAGENT_FILE=\"demo/alice-pg.py\"\n\tAGENT_PORT=8030\n\tAGENT_PORT_RANGE=8030-8037\nfi\n\nDOCKER_ENV=\"-e LOG_LEVEL=${LOG_LEVEL}\"\n\n# on Windows, docker run needs to be prefixed by winpty\nif [[ \"$OSTYPE\" == \"msys\" ]]; then\n winpty docker run --name $AGENT --rm -e RUNMODE=docker -p 0.0.0.0:$AGENT_PORT_RANGE:$AGENT_PORT_RANGE $DOCKER_ENV -it faber-alice-demo $AGENT_FILE $AGENT_PORT\nelse\n docker run --name $AGENT --rm -e RUNMODE=docker -p 0.0.0.0:$AGENT_PORT_RANGE:$AGENT_PORT_RANGE $DOCKER_ENV -it faber-alice-demo $AGENT_FILE $AGENT_PORT\nfi\n" }, { "alpha_fraction": 0.6605374813079834, "alphanum_fraction": 0.6619518995285034, "avg_line_length": 36.21052551269531, "blob_id": "abcac2b077f440bd7ff48e5cedaf08b96c74d3d5", "content_id": "d407050f0c08eaccbf54f7d47a806ac0097342e3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2121, "license_type": "permissive", "max_line_length": 88, "num_lines": 57, "path": "/agent/indy_catalyst_agent/messaging/credentials/handlers/credential_request_handler.py", "repo_name": "nairobi222/indy-catalyst", "src_encoding": "UTF-8", "text": "\"\"\"Credential request handler.\"\"\"\n\nfrom ...base_handler import BaseHandler, BaseResponder, HandlerException, RequestContext\n\nfrom ..manager import CredentialManager\nfrom ..messages.credential_request import CredentialRequest\nfrom ....cache.base import BaseCache\n\n\nclass CredentialRequestHandler(BaseHandler):\n \"\"\"Message handler class for credential requests.\"\"\"\n\n async def handle(self, context: RequestContext, responder: BaseResponder):\n \"\"\"\n Message handler logic for credential requests.\n\n Args:\n context: request context\n responder: responder callback\n \"\"\"\n self._logger.debug(f\"CredentialRequestHandler called with context {context}\")\n\n assert isinstance(context.message, CredentialRequest)\n\n self._logger.info(\n \"Received credential request: %s\", context.message.serialize(as_string=True)\n )\n\n if not context.connection_active:\n raise HandlerException(\"No connection established for credential request\")\n\n credential_manager = CredentialManager(context)\n credential_exchange_record = await credential_manager.receive_request(\n context.message\n )\n\n # We cache some stuff in order to re-issue again in the future\n # without this roundtrip. It is used in credentials/manager.py\n cache: BaseCache = await context.inject(BaseCache)\n await cache.set(\n \"credential_exchange::\"\n + f\"{credential_exchange_record.credential_definition_id}::\"\n + f\"{credential_exchange_record.connection_id}\",\n credential_exchange_record.credential_exchange_id,\n 600,\n )\n\n # If auto_issue is enabled, respond immediately\n if credential_exchange_record.auto_issue:\n (\n credential_exchange_record,\n credential_issue_message,\n ) = await credential_manager.issue_credential(\n credential_exchange_record, credential_exchange_record.credential_values\n )\n\n await responder.send_reply(credential_issue_message)\n" }, { "alpha_fraction": 0.587527871131897, "alphanum_fraction": 0.5880237817764282, "avg_line_length": 30.14285659790039, "blob_id": "f8a2b47e2a04424b6d34b54e785fcdc47c4ef35f", "content_id": "0786f5f9285cd4d463f3ee2bdd3c3fcd0f30454f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8066, "license_type": "permissive", "max_line_length": 88, "num_lines": 259, "path": "/agent/indy_catalyst_agent/ledger/indy.py", "repo_name": "nairobi222/indy-catalyst", "src_encoding": "UTF-8", "text": "\"\"\"Indy ledger implementation.\"\"\"\n\nimport json\nimport logging\nimport tempfile\nfrom os import path\nimport re\n\nimport indy.anoncreds\nimport indy.ledger\nimport indy.pool\nfrom indy.error import IndyError, ErrorCode\n\nfrom .base import BaseLedger\nfrom .error import ClosedPoolError, LedgerTransactionError, DuplicateSchemaError\n\nGENESIS_TRANSACTION_PATH = tempfile.gettempdir()\nGENESIS_TRANSACTION_PATH = path.join(\n GENESIS_TRANSACTION_PATH, \"indy_genesis_transactions.txt\"\n)\n\n\nclass IndyLedger(BaseLedger):\n \"\"\"Indy ledger class.\"\"\"\n\n def __init__(self, name, wallet, genesis_transactions):\n \"\"\"\n Initialize an IndyLedger instance.\n\n Args:\n wallet: IndyWallet instance\n genesis_transactions: String of genesis transactions\n\n \"\"\"\n self.logger = logging.getLogger(__name__)\n\n self.name = name\n self.wallet = wallet\n self.pool_handle = None\n\n # TODO: ensure wallet type is indy\n\n # indy-sdk requires a file but it's only used once to bootstrap\n # the connection so we take a string instead of create a tmp file\n with open(GENESIS_TRANSACTION_PATH, \"w\") as genesis_file:\n genesis_file.write(genesis_transactions)\n\n async def __aenter__(self) -> \"IndyLedger\":\n \"\"\"\n Context manager entry.\n\n Returns:\n The current instance\n\n \"\"\"\n pool_config = json.dumps({\"genesis_txn\": GENESIS_TRANSACTION_PATH})\n\n # We only support proto ver 2\n await indy.pool.set_protocol_version(2)\n\n self.logger.debug(\"Creating pool ledger...\")\n try:\n await indy.pool.create_pool_ledger_config(self.name, pool_config)\n except IndyError as error:\n if error.error_code == ErrorCode.PoolLedgerConfigAlreadyExistsError:\n self.logger.debug(\"Pool ledger already created.\")\n else:\n raise\n\n # TODO: allow ledger config in init?\n self.pool_handle = await indy.pool.open_pool_ledger(self.name, \"{}\")\n return self\n\n async def __aexit__(self, exc_type, exc, tb):\n \"\"\"Context manager exit.\"\"\"\n await indy.pool.close_pool_ledger(self.pool_handle)\n self.pool_handle = None\n\n async def _submit(self, request_json: str, sign=True) -> str:\n \"\"\"\n Sign and submit request to ledger.\n\n Args:\n request_json: The json string to submit\n sign: whether or not to sign the request\n\n \"\"\"\n\n if not self.pool_handle:\n raise ClosedPoolError(\n \"Cannot sign and submit request to closed pool {}\".format(self.name)\n )\n\n public_did = await self.wallet.get_public_did()\n\n if sign:\n request_result_json = await indy.ledger.sign_and_submit_request(\n self.pool_handle, self.wallet.handle, public_did.did, request_json\n )\n else:\n request_result_json = await indy.ledger.submit_request(\n self.pool_handle, request_json\n )\n\n request_result = json.loads(request_result_json)\n\n operation = request_result.get(\"op\", \"\")\n\n # HACK: If only there were a better way to identify this kind\n # of rejected request...\n if (\n \"can have one and only one SCHEMA with name schema and version\"\n in request_result_json\n ):\n raise DuplicateSchemaError()\n\n if operation in (\"REQNACK\", \"REJECT\"):\n raise LedgerTransactionError(\n f\"Ledger rejected transaction request: {request_result['reason']}\"\n )\n\n elif operation == \"REPLY\":\n return request_result_json\n\n else:\n raise LedgerTransactionError(\n f\"Unexpected operation code from ledger: {operation}\"\n )\n\n async def send_schema(self, schema_name, schema_version, attribute_names: list):\n \"\"\"\n Send schema to ledger.\n\n Args:\n schema_name: The schema name\n schema_version: The schema version\n attribute_names: A list of schema attributes\n\n \"\"\"\n\n public_did = await self.wallet.get_public_did()\n\n schema_id, schema_json = await indy.anoncreds.issuer_create_schema(\n public_did.did, schema_name, schema_version, json.dumps(attribute_names)\n )\n\n request_json = await indy.ledger.build_schema_request(\n public_did.did, schema_json\n )\n\n try:\n await self._submit(request_json)\n except DuplicateSchemaError as e:\n self.logger.warn(\n \"Schema already exists on ledger. Returning ID. \" + f\"Error: {str(e)}\"\n )\n schema_id = f\"{public_did.did}:{2}:{schema_name}:{schema_version}\"\n\n return schema_id\n\n async def get_schema(self, schema_id):\n \"\"\"\n Get schema from ledger.\n\n Args:\n schema_id: The schema id to retrieve\n\n \"\"\"\n\n public_did = await self.wallet.get_public_did()\n\n request_json = await indy.ledger.build_get_schema_request(\n public_did.did, schema_id\n )\n\n response_json = await self._submit(request_json)\n _, parsed_schema_json = await indy.ledger.parse_get_schema_response(\n response_json\n )\n parsed_response = json.loads(parsed_schema_json)\n\n return parsed_response\n\n async def send_credential_definition(self, schema_id, tag=\"default\"):\n \"\"\"\n Send credential definition to ledger and store relevant key matter in wallet.\n\n Args:\n schema_id: The schema id of the schema to create cred def for\n tag: Option tag to distinguish multiple credential definitions\n\n \"\"\"\n\n public_did = await self.wallet.get_public_did()\n schema = await self.get_schema(schema_id)\n\n # TODO: add support for tag, sig type, and config\n try:\n (\n credential_definition_id,\n credential_definition_json,\n ) = await indy.anoncreds.issuer_create_and_store_credential_def(\n self.wallet.handle,\n public_did.did,\n json.dumps(schema),\n tag,\n \"CL\",\n json.dumps({\"support_revocation\": False}),\n )\n # If the cred def already exists in the wallet, we need some way of obtaining\n # that cred def id (from schema id passed) since we can now assume we can use\n # it in future operations.\n except IndyError as error:\n if error.error_code == ErrorCode.AnoncredsCredDefAlreadyExistsError:\n try:\n cred_def_id = re.search(r\"\\w*:\\d*:CL:\\d*:\\w*\", error.message).group(\n 0\n )\n return cred_def_id\n # The regex search failed so let the error bubble up\n except AttributeError:\n raise error\n else:\n raise\n\n request_json = await indy.ledger.build_cred_def_request(\n public_did.did, credential_definition_json\n )\n\n await self._submit(request_json)\n\n # TODO: validate response\n\n return credential_definition_id\n\n async def get_credential_definition(self, credential_definition_id):\n \"\"\"\n Get a credential definition from the ledger by id.\n\n Args:\n credential_definition_id: The schema id of the schema to create cred def for\n\n \"\"\"\n\n public_did = await self.wallet.get_public_did()\n\n request_json = await indy.ledger.build_get_cred_def_request(\n public_did.did, credential_definition_id\n )\n\n response_json = await self._submit(request_json)\n\n (\n _,\n parsed_credential_definition_json,\n ) = await indy.ledger.parse_get_cred_def_response(response_json)\n parsed_response = json.loads(parsed_credential_definition_json)\n\n return parsed_response\n" }, { "alpha_fraction": 0.7599999904632568, "alphanum_fraction": 0.7599999904632568, "avg_line_length": 49, "blob_id": "b406e27f6ca225d532743407e24fe128ca59982b", "content_id": "967e89c1ac1fc5667c0ffc08be7d366790cdf52e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 50, "license_type": "permissive", "max_line_length": 49, "num_lines": 1, "path": "/agent/demo/requirements.txt", "repo_name": "nairobi222/indy-catalyst", "src_encoding": "UTF-8", "text": "git+https://github.com/webpy/webpy.git#egg=web.py\n" }, { "alpha_fraction": 0.7176470756530762, "alphanum_fraction": 0.7176470756530762, "avg_line_length": 34.41666793823242, "blob_id": "acc0a0ebaa8abddd3e1bb4f96175e113de5786f8", "content_id": "fc98563fb88956631c9ae368c94fad81f97a16f2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 850, "license_type": "permissive", "max_line_length": 76, "num_lines": 24, "path": "/agent/indy_catalyst_agent/ledger/provider.py", "repo_name": "nairobi222/indy-catalyst", "src_encoding": "UTF-8", "text": "\"\"\"Default ledger provider classes.\"\"\"\n\nimport logging\n\nfrom ..classloader import ClassLoader\nfrom ..config.base import BaseProvider, BaseInjector, BaseSettings\nfrom ..wallet.base import BaseWallet\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass LedgerProvider(BaseProvider):\n \"\"\"Provider for the default ledger implementation.\"\"\"\n\n LEDGER_CLASSES = {\"indy\": \"indy_catalyst_agent.ledger.indy.IndyLedger\"}\n\n async def provide(self, settings: BaseSettings, injector: BaseInjector):\n \"\"\"Create and open the ledger instance.\"\"\"\n\n genesis_transactions = settings.get(\"ledger.genesis_transactions\")\n if genesis_transactions:\n wallet = await injector.inject(BaseWallet)\n IndyLedger = ClassLoader.load_class(self.LEDGER_CLASSES[\"indy\"])\n return IndyLedger(\"default\", wallet, genesis_transactions)\n" } ]
5
philpot/stanford-extraction-prep
https://github.com/philpot/stanford-extraction-prep
261eee865238380a04e93bfa3145da1ffd085480
c29d523c9c36403628535864f85d35c7b434b0f1
a84b934ae01933ac338102246615826989020c92
refs/heads/master
2021-01-10T06:17:09.948956
2015-11-10T20:03:43
2015-11-10T20:03:43
45,663,204
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5960127711296082, "alphanum_fraction": 0.6118738651275635, "avg_line_length": 31.95661163330078, "blob_id": "9725d10c3462241c9b1ef11c6d7f9d5b34978b96", "content_id": "93a7d0dd02188385b6bb5a6502f72ddcc1285f3a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15951, "license_type": "permissive", "max_line_length": 169, "num_lines": 484, "path": "/prep.py", "repo_name": "philpot/stanford-extraction-prep", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n### TEST WITH THIS NUMBER (818) 200-6476\n### TEST WITH THIS VERY COMMON NUMBER (415) 683-3245\n\ntry:\n from pyspark import SparkContext, SparkFiles\nexcept:\n print \"### NO PYSPARK\"\nimport sys\nimport os\nimport platform\nimport socket\nimport argparse\nimport json\nfrom itertools import izip, izip_longest, count\nimport time\nfrom datetime import timedelta\nfrom random import randint\n\n\n### from trollchar.py\n\ndef asList(x):\n if isinstance(x, list):\n return x\n else:\n return [x]\n\n### from util.py\n\ndef iterChunks(iterable, n, fillvalue=None):\n args = [iter(iterable)] * n\n return izip_longest(*args, fillvalue=fillvalue)\n\n### end from util.py\n# Sniff for execution environment\n\nlocation = \"hdfs\"\ntry:\n if \"avatar\" in platform.node():\n location = \"local\"\nexcept:\n pass\ntry:\n if \"avatar\" in socket.gethostname():\n location = \"local\"\nexcept:\n pass\nprint \"### location %s\" % location\n\n\nconfigDir = os.getcwd() if location==\"hdfs\" else os.path.join(os.path.dirname(__file__), \"data/config\")\ndef configPath(n):\n return os.path.join(configDir, n)\n\nbinDir = os.getcwd() if location==\"hdfs\" else os.path.join(os.path.dirname(__file__), \"bin\")\ndef binPath(n):\n return os.path.join(binDir, n)\n\n# Adapted from Dipsy's list from dig-aligment ht version 1.0\n\nsourceById = {\n\"1\":\t\"backpage\",\n\"2\":\t\"craigslist\",\n\"3\":\t\"classivox\",\n\"4\":\t\"myproviderguide\",\n\"5\":\t\"naughtyreviews\",\n\"6\":\t\"redbook\",\n\"7\":\t\"cityvibe\",\n\"8\":\t\"massagetroll\",\n\"9\":\t\"redbookforum\",\n\"10\": \"cityxguide\",\n\"11\": \"cityxguideforum\",\n\"12\": \"rubads\",\n\"13\": \"anunico\",\n\"14\": \"sipsap\",\n\"15\": \"escortsincollege\",\n\"16\": \"escortphonelist\",\n\"17\": \"eroticmugshots\",\n\"18\": \"escortadsxxx\",\n\"19\": \"escortsinca\",\n\"20\": \"escortsintheus\",\n\"21\": \"liveescortreviews\",\n\"22\": \"myproviderguideforum\",\n\"23\": \"usasexguide\",\n\"24\": \"theeroticreview\",\n\"25\": \"adultsearch\",\n\"26\": \"happymassage\",\n\"27\": \"utopiaguide\",\n\"28\": \"missing kids\",\n\"29\": \"alibaba\",\n\"30\": \"justlanded\",\n\"31\": \"gmdu\",\n\"32\": \"tradekey\",\n\"33\": \"manpowervacancy\",\n\"34\": \"gulfjobsbank\",\n\"35\": \"ec21\"\n}\n\nsourceByName = {\n\"backpage\":\t\"1\",\n\"craigslist\":\t\"2\",\n\"classivox\":\t\"3\",\n\"myproviderguide\":\t\"4\",\n\"naughtyreviews\":\t\"5\",\n\"redbook\":\t\"6\",\n\"cityvibe\":\t\"7\",\n\"massagetroll\":\t\"8\",\n\"redbookforum\":\t\"9\",\n\"cityxguide\": \"10\",\n\"cityxguideforum\": \"11\",\n\"rubads\": \"12\",\n\"anunico\": \"13\",\n\"sipsap\": \"14\",\n\"escortsincollege\": \"15\",\n\"escortphonelist\": \"16\",\n\"eroticmugshots\": \"17\",\n\"escortadsxxx\": \"18\",\n\"escortsinca\": \"19\",\n\"escortsintheus\": \"20\",\n\"liveescortreviews\": \"21\",\n\"myproviderguideforum\": \"22\",\n\"usasexguide\": \"23\",\n\"theeroticreview\": \"24\",\n\"adultsearch\": \"25\",\n\"happymassage\": \"26\",\n\"utopiaguide\": \"27\",\n\"missing kids\": \"28\",\n\"alibaba\": \"29\",\n\"justlanded\": \"30\",\n\"gmdu\": \"31\",\n\"tradekey\": \"32\",\n\"manpowervacancy\": \"33\",\n\"gulfjobsbank\": \"34\",\n\"ec21\": \"35\"\n}\n\n\"\"\"\nadultsearch\nbackpage\ncityvibe\ncityxguide\nclassivox\ncraigslist\nescortadsxxx\nescortphonelist\nescortsinca\nescortsincollege\nescortsintheus\nmassagetroll\nmyproviderguide\nredbook\nrubads\nsipsap\nusasexguide\nutopiaguide\n\"\"\"\n\n\"\"\"\nadultsearch\nbackpage\ncityvibe\ncityxguide\nclassivox\ncraigslist\nescortadsxxx\nescortphonelist\nescortsinca\nescortsincollege\nescortsintheus\nmassagetroll\nmyproviderguide\nredbook\nrubads\nsipsap\nusasexguide\nutopiaguide\n\"\"\"\n\ndef getSourceById(id):\n return sourceById.get(id, \"unknownsourceid_{}\".format(id))\n\ndef getSourceByName(name):\n return sourceByName.get(name, \"unknownsourcename_{}\".format(name))\n\ndef prep(sc, cdr, stanford, successOutput, failOutput,\n # how many prefix fields to drop from stanford input\n shift = 0,\n uriClass='Offer',\n # minimum initial number of partitions\n numPartitions=None, \n limit=None, \n debug=0, \n location='hdfs', \n outputFormat=\"text\",\n sampleSeed=1234,\n # Cheat means artificially limit to only those cases covering known number (415) 683-3245\n cheat=False):\n\n show = True if debug>=1 else False\n def showPartitioning(rdd):\n \"\"\"Seems to be significantly more expensive on cluster than locally\"\"\"\n if show:\n partitionCount = rdd.getNumPartitions()\n try:\n valueCount = rdd.countApprox(1000, confidence=0.50)\n except:\n valueCount = -1\n print (\"At %s, there are %d partitions with on average %s values\" % \n (rdd.name(), partitionCount, int(valueCount/float(partitionCount))))\n\n debugOutput = successOutput + '_debug'\n def debugDump(rdd,keys=True,listElements=False):\n showPartitioning(rdd)\n keys=False\n if debug >= 2:\n startTime = time.time()\n outdir = os.path.join(debugOutput, rdd.name() or \"anonymous-%d\" % randint(10000,99999))\n keyCount = None\n try:\n keyCount = rdd.keys().count() if keys else None\n except:\n pass\n rowCount = None\n try:\n rowCount = rdd.count()\n except:\n pass\n elementCount = None\n try:\n elementCount = rdd.mapValues(lambda x: len(x) if isinstance(x, (list, tuple)) else 0).values().sum() if listElements else None\n except:\n pass\n rdd.saveAsTextFile(outdir)\n endTime = time.time()\n elapsedTime = endTime - startTime\n print \"wrote [%s] to outdir %r: [%s, %s, %s]\" % (str(timedelta(seconds=elapsedTime)), outdir, keyCount, rowCount, elementCount)\n\n def showSizeAndExit(rdd):\n try:\n k = rdd.count()\n except:\n k = None\n print \"Just finished %s with size %s\" % (rdd.name(), k)\n exit(0)\n\n rdd_cdr = sc.textFile(cdr)\n if limit:\n # Because take/takeSample collects back to master, can create \"task too large\" condition\n # rdd_ingest = sc.parallelize(rdd_ingest.take(limit))\n # Instead, generate approximately 'limit' rows\n ratio = float(limit) / rdd_cdr.count()\n rdd_cdr = rdd_cdr.sample(False, ratio, seed=sampleSeed)\n rdd_cdr.setName('cdr')\n debugDump(rdd_cdr)\n \n def splitCdrLine(line):\n (url, jdata) = line.split('\\t')\n d = json.loads(jdata)\n # sid = d[\"_source\"][\"sid\"]\n sourceId = d[\"_source\"][\"sources_id\"]\n incomingId = d[\"_source\"][\"incoming_id\"]\n id = d[\"_source\"][\"id\"]\n url = d[\"_source\"][\"url\"]\n return ( (sourceId, incomingId), (id, url) )\n rdd_cdr_split = rdd_cdr.map(lambda line: splitCdrLine(line))\n rdd_cdr_split.setName('rdd_cdr_split')\n debugDump(rdd_cdr_split)\n\n rdd_cdr_sort = rdd_cdr_split.sortByKey()\n rdd_cdr_sort.setName('rdd_cdr_sort')\n debugDump(rdd_cdr_sort)\n\n rdd_stanford = sc.textFile(stanford)\n rdd_stanford.setName('rdd_stanford')\n if limit:\n # Because take/takeSample collects back to master, can create \"task too large\" condition\n # rdd_ingest = sc.parallelize(rdd_ingest.take(limit))\n # Instead, generate approximately 'limit' rows\n ratio = float(limit) / rdd_stanford.count()\n rdd_stanford = rdd_stanford.sample(False, ratio, seed=sampleSeed)\n\n # temp\n if cheat:\n rdd_stanford = rdd_stanford.filter(lambda line: \"(415) 683\" in line)\n\n rdd_stanford.setName('stanford')\n debugDump(rdd_stanford)\n\n\n def splitStanfordLine(line):\n fields = line.split('\\t')[shift:]\n sourceNameCrawlId, valuesExpr = fields\n (sourceName, crawlId) = sourceNameCrawlId.split(\":\")\n sourceId = getSourceByName(sourceName)\n try:\n sourceId = int(sourceId)\n except:\n pass\n try:\n crawlId = int(crawlId)\n except:\n pass\n values = valuesExpr.split(',')\n return ( (sourceId, crawlId), tuple(values) )\n\n rdd_stanford_split = rdd_stanford.map(lambda line: splitStanfordLine(line))\n rdd_stanford_split.setName('rdd_stanford_split')\n debugDump(rdd_stanford_split)\n\n rdd_stanford_sort = rdd_stanford_split.sortByKey()\n rdd_stanford_sort.setName('rdd_stanford_sort')\n debugDump(rdd_stanford_sort)\n\n # all stanford gets a CDR tag\n # rdd_net = rdd_stanford_sort.leftOuterJoin(rdd_cdr_sort)\n # elements look like\n # (sourceIdInt, crawlIdInt) => ( <stanfordValuesTuple>, <cdrValuesTuple> )\n # ((4, 129640), ((u'(415) 683-3245',), (346752, u'http://www.myproviderguide.com/escorts/san-francisco/free-posts/w4m/5686036_outcalls-only-silm-girl-korean.html')))\n # where <stanfordValuesTuple> can be None\n # where <cdrValuesTuple> can be None\n # this is what we might want\n # left outer join:\n # keep everything from stanford, even if no match from CDR\n rdd_net = rdd_stanford_sort.leftOuterJoin(rdd_cdr_sort)\n rdd_net.setName('rdd_net')\n debugDump(rdd_net)\n\n # successful are those where cdr ID is not None)\n rdd_success = rdd_net.filter(lambda r: r[1] and r[1][1] and r[1][1][0])\n print \"Success {} tuples\".format(rdd_success.count())\n rdd_success.setName('rdd_success')\n debugDump(rdd_success)\n\n rdd_fail = rdd_net.filter(lambda r: not (r[1] and r[1][1] and r[1][1][0]))\n print \"Fail {} tuples\".format(rdd_fail.count())\n\n def emitJson(r):\n (k, payload) = r\n (sourceId, crawlId) = k\n (stanfordValues, cdrValues) = payload\n adId = None\n url = None\n try:\n (adId, url) = cdrValues\n except:\n pass\n d = {}\n if sourceId:\n d[\"sourceId\"] = sourceId\n sourceName = None\n try:\n sourceName = getSourceById(str(sourceId))\n except:\n pass\n if sourceName:\n d[\"sourceName\"] = sourceName\n if crawlId:\n d[\"crawlId\"] = crawlId\n if adId:\n d[\"adId\"] = adId\n if url:\n d[\"url\"] = url\n for (value, idx) in izip(stanfordValues, count(1)):\n if value:\n d[\"stanfordExtraction{}\".format(idx)] = value\n return d\n\n def jsonRow(r):\n d = emitJson(r)\n return (d.get(\"url\", \"noUrlAvailable\"), json.dumps(d))\n\n # rdd_success_json = rdd_success.map(lambda r: (json.dumps(emitJson(r)))\n rdd_success_json = rdd_success.map(lambda r: jsonRow(r))\n\n if rdd_success_json.isEmpty():\n print \"### NO SUCCESS DATA TO WRITE\"\n else:\n if outputFormat == \"sequence\":\n rdd_success_json.saveAsSequenceFile(successOutput)\n elif outputFormat == \"text\":\n rdd_success_json.saveAsTextFile(successOutput)\n elif outputFormat == \"tsv\":\n # might not work anymore\n rdd_success = rdd_success_json.map(lambda (k,p): k + \"\\t\" + p[0] + \"\\t\" + p[1])\n rdd_success.saveAsTextFile(successOutput)\n elif outputFormat == \"newSequence\":\n # adapted from organizationPatentAndLegalActionAggregations\n # This does not work\n # apparently we can't write something that it thinks is an ArrayWritable as Text\n # wrong key class: org.apache.hadoop.io.ArrayWritable is not class org.apache.hadoop.io.Text\n # seems like a Converter class is needed, but can't find one that performs this conversion\n outputFormatClassName = \"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat\"\n config= {\"mapreduce.output.fileoutputformat.compress\": \"true\", \n \"mapreduce.output.fileoutputformat.compress.codec\": \"org.apache.hadoop.io.compress.DefaultCodec\",\n \"mapreduce.output.fileoutputformat.compress.type\": \"RECORD\"}\n rdd_success.saveAsNewAPIHadoopFile(successOutput, outputFormatClassName,\n \"org.apache.hadoop.io.Text\", \"org.apache.hadoop.io.Text\",\n None, None, config)\n else:\n raise RuntimeError(\"Unrecognized output format: %s\" % outputFormat)\n\n # rdd_fail_json = rdd_fail.map(lambda r: json.dumps(emitJson(r)))\n rdd_fail_json = rdd_fail.map(lambda r: jsonRow(r))\n\n if rdd_fail_json.isEmpty():\n print \"### NO FAIL DATA TO WRITE\"\n else:\n if outputFormat == \"sequence\":\n rdd_fail_json.saveAsSequenceFile(failOutput)\n elif outputFormat == \"text\":\n rdd_fail_json.saveAsTextFile(failOutput)\n elif outputFormat == \"tsv\":\n # might not work anymore\n rdd_fail = rdd_fail_json.map(lambda (k,p): k + \"\\t\" + p[0] + \"\\t\" + p[1])\n rdd_fail.saveAsTextFile(failOutput)\n elif outputFormat == \"newSequence\":\n # adapted from organizationPatentAndLegalActionAggregations\n outputFormatClassName = \"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat\"\n config= {\"mapreduce.output.fileoutputformat.compress\": \"true\", \n \"mapreduce.output.fileoutputformat.compress.codec\": \"org.apache.hadoop.io.compress.DefaultCodec\",\n \"mapreduce.output.fileoutputformat.compress.type\": \"RECORD\"}\n rdd_fail.saveAsNewAPIHadoopFile(failOutput, outputFormatClassName,\n \"org.apache.hadoop.io.Text\", \"org.apache.hadoop.io.Text\",\n None, None, config)\n else:\n raise RuntimeError(\"Unrecognized output format: %s\" % outputFormat)\n\ndef main(argv=None):\n '''this is called if run from command line'''\n # pprint.pprint(sorted(os.listdir(os.getcwd())))\n parser = argparse.ArgumentParser()\n # parser.add_argument('-c','--cdr', default='data/in/cdr/1ht.json')\n # parser.add_argument('-c','--cdr', default='data/in/cdr/1000ht.json')\n # parser.add_argument('-c','--cdr', default='data/in/cdr/250k_ht.json')\n parser.add_argument('-c','--cdr', default='data/in/cdr/fake.json')\n # parser.add_argument('-s','--stanford', default='data/in/stanford/phone_numbers.tsv')\n parser.add_argument('-s','--stanford', default='data/in/stanford/phone_numbers2.tsv')\n parser.add_argument('-g','--success', required=True)\n parser.add_argument('-f','--fail', required=True)\n parser.add_argument('-k','--shift', default=0, type=int)\n parser.add_argument('-u','--uriClass', default='Offer')\n parser.add_argument('-p','--numPartitions', required=False, default=None, type=int,\n help='minimum initial number of partitions')\n parser.add_argument('-n','--name', required=False, default=\"\", help='Added to name of spark job, for debugging')\n parser.add_argument('-l','--limit', required=False, default=None, type=int)\n parser.add_argument('-v','--verbose', required=False, help='verbose', action='store_true')\n parser.add_argument('-z','--debug', required=False, help='debug', type=int)\n parser.add_argument('-x','--cheat', required=False, help='cheat', action='store_true')\n parser.add_argument('-y','--outputFormat', default='sequence')\n args=parser.parse_args()\n\n # might become an option\n outputFormat = 'sequence'\n outputFormat = 'text'\n outputFormat = args.outputFormat\n\n if not args.numPartitions:\n if location == \"local\":\n args.numPartitions = 3\n elif location == \"hdfs\":\n args.numPartitions = 50\n\n sparkName = \"prep\"\n if args.name:\n sparkName = sparkName + \" \" + str(args.name)\n\n sc = SparkContext(appName=sparkName)\n prep(sc, args.cdr, args.stanford,\n args.success,\n args.fail,\n uriClass=args.uriClass,\n numPartitions=args.numPartitions,\n limit=args.limit,\n debug=args.debug,\n outputFormat=outputFormat,\n shift=args.shift,\n location=location,\n cheat=args.cheat)\n\n# call main() if this is run as standalone\nif __name__ == \"__main__\":\n sys.exit(main())\n" }, { "alpha_fraction": 0.7224959135055542, "alphanum_fraction": 0.7743842601776123, "avg_line_length": 32.10869598388672, "blob_id": "e7ecf674a8572d1d077b9ade826d0ae12e9514e9", "content_id": "5c349af5de351d7d53e17003f171e6cc2924640d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3045, "license_type": "permissive", "max_line_length": 139, "num_lines": 92, "path": "/README.md", "repo_name": "philpot/stanford-extraction-prep", "src_encoding": "UTF-8", "text": "This module converts stanford extractions to sequence files to be modeled by Karma.\n\nphilpot 10 November 2015\n\nStanford generates tab-separated data rows that look like this:\n\n<sourceName>:<crawlId>\\t<payload>\n\n(phone, service, ismassageparlor)\n\ne.g.\n\nadultsearch:10024 (856) 676-4184\nadultsearch:10025 (347) 460-7854\n\nor\n\n<auxID>\\t<sourceName>:<crawlId>\\t<payload>\n\n(rates, email)\n\ne.g.\n\nrates_7598613 adultsearch:112 200,30 MINS\nrates_7605289 adultsearch:11096 150,1 HOUR\n\nWe ignore the auxID. Also see discussion of -k/--shift below.\n\nThere are Stanford extractions named movement and organized. These\nhave no payload, merely the <sourceName>:<crawlId> pair. Presumably\nthis can be used to create groups/clusters. We are ignoring these for\nnow.\n\nIn some cases, the payload is really two related values separated by comma.\n\nThis spark job joins the <sourceName>:<crawlId> in the indicated input\nfile with a selected CDR-derived sequence file. The CDR sequence data\nrows are expected to be <url>\\t<json>. The JSON must have fields:\n\n_source.sources_id\n_source.incoming_id\n_source.id\n\nand should have field\n\n _source.url\n\nThe sources_id and sourceName fields are 1:1 and a mapping table to\nthis effect generated by Dipsy from dig-alignment/ht version 1 was\nadapted for performing the mapping. The incoming_id is referred to\nabove as crawlId. The id is the output database id. The url field\nextracted from the payload should be equally available as the tsv key\nfield.\n\nThe Spark job performs an left outer join from the stanford data to\nthe CDR data. This means that any Stanford record which does not\nmatch a CDR record will be assigned NULL as CDR data. Matching\nrecords are deemed success cases and any NULL records are deemed\nfailure cases but are retained.\n\nCommand line arguments\n\n-h: help\n-v: verbose\n-c: location of CDR data\n-s: location of Stanford data\n-g: directory to write successful data (conventional output)\n-f: directory to write failure data (join misses)\n-k: number of columns to shift left. Typically 0, set to 1 to drop the <auxId> field from rates, email.\n-u: ignored\n-p: how many Spark partitions to assign\n-n: can be set to the name of the task for tracking/distinguishing calls between running jobs\n-l: for debugging: consider only approximately this number of records\n-z: for debugging: if 2, spill rdd contents to named file at each rdd computed; if 1, dump only partition information; if 0, no information\n-x: for debugging: focus on a single known record only\n-y: outputFormat: may be text, sequence, tsv, or newSequence (= compressed sequence file using new Hadoop API: under development)\n\n\nCounts of stanford extractions (escort directory)\n 433922 email_addresses.tsv\n 29907390 ismassageparlorad.tsv\n 13854857 phone_numbers.tsv\n 7598570 rates.tsv\n 29907390 service.tsv\n\nEstimated yield when joining with ht20mil\n\nemail: 46% or roughly 199600\nmassage: 53% or roughly 15850000\nphone numbers: 55% or roughly 7620000\nrate: 52% or roughly 3951000\nservice (incall/outcall/etc.): 16449000" }, { "alpha_fraction": 0.6833977103233337, "alphanum_fraction": 0.7065637111663818, "avg_line_length": 31.25, "blob_id": "176319cd02570752ae0c11a4fa5d579d4f8fe215", "content_id": "637e6096a699811b777785d526b3c0f01de8ead3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 259, "license_type": "permissive", "max_line_length": 97, "num_lines": 8, "path": "/prep.sh", "repo_name": "philpot/stanford-extraction-prep", "src_encoding": "UTF-8", "text": "/usr/lib/spark/bin/spark-submit \\\n--master yarn-client \\\n--driver-memory 8g \\\n--executor-memory 80G --executor-cores 5 \\\n--num-executors 20 \\\n--conf \"spark.executor.extraJavaOptions=-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps\" \\\n prep.py \\\n $@\n\n" } ]
3
WesleyW-code/First-Capstone-Project-Level-1-Python
https://github.com/WesleyW-code/First-Capstone-Project-Level-1-Python
8645e852f4a3b2cea796395c2f0de967ebad7b2b
b05881f1fd2f8738aa1e46164be79f6ad3e63704
7f8cce027fa06fe1b0a3df4bf13f1f468be9192e
refs/heads/main
2023-04-14T04:26:27.542665
2021-05-05T11:17:33
2021-05-05T11:17:33
364,540,431
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6948952674865723, "alphanum_fraction": 0.6999114751815796, "avg_line_length": 76.48837280273438, "blob_id": "722a4edd142105b64ee5fd1c68bc65b600470364", "content_id": "dad4f091d0fbe447479979a2339194254fccc135", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3389, "license_type": "no_license", "max_line_length": 271, "num_lines": 43, "path": "/Finance calculators.py", "repo_name": "WesleyW-code/First-Capstone-Project-Level-1-Python", "src_encoding": "UTF-8", "text": "# Capstone project\r\n\r\n# Importing math module:\r\n\r\nimport math\r\n\r\n# Allowing user to chose between investment or bond:\r\n# Printing the choices for the user and giving description of each choice:\r\n\r\nchoice = input(\"Choose either 'investment' or 'bond' from the menu below to proceed:\\n \\ninvestment - to calculate the amount of interest you'll earn on interest\\nbond - to calculate the amount you'll have to pay on a home loan\\n\\nPlease enter your finance option now: \")\r\n\r\n# Displaying error message if the user does not enter the selection correctly:\r\n\r\nif choice != \"investment\" and choice != \"Investment\" and choice != \"INVESTMENT\" and choice != \"Bond\" and choice != \"bond\" and choice != \"BOND\":\r\n print(\"Invalid entry - Please make sure you have spelled your option correctly, only use capitals at the begining or throughout your inputed option.\")\r\n \r\n# If user selects the investment option this formula will run:\r\n\r\nelif choice == \"investment\" or choice == \"Investment\" or choice == \"INVESTMENT\":\r\n money = float(input(\"\\nEnter the amount of money you want to deposit: \")) # Ask the user the amount of money that they are depositing.\r\n rate = float(input(\"Enter the percentage interest rate you will be receiving (only the number not the percentage symbol): \")) # Ask the user to enter the interest rate (just the number).\r\n years = float(input(\"Enter the number of years you plan to invest for: \")) # Ask the user the number of years they plan to invest.\r\n interest = input(\"Do you want 'simple' or 'compound' interest: (Enter simple or compound) \") # Ask user wether they want simple or compound interest.\r\n conv_rte = rate / 100\r\n if interest == \"simple\":\r\n answer = money*(1 + conv_rte * years) # Formulate the answer.\r\n print(\"\\nThe amount you will receive after\",years,\"years, with a simple interest rate of\",rate,\"percent is: R\",round(answer,2)) # Print out the answer.\r\n elif interest == \"compound\":\r\n answer = money * math.pow((1 + conv_rte),years) # Formulate the answer.\r\n print(\"\\nThe amount you will receive after\",years,\"years, with a compound interest rate of\",rate,\"percent is: R\",round(answer,2)) # Print out the answer.\r\n else:\r\n print(\"The type of interest has not been selected or typed in correctly!\") # Print out error message if 'simple' or 'compound' has not been entered correctly.\r\n\r\n# If user selects the bond option this formula will run:\r\n\r\nelse:\r\n choice == \"Bond\" or choice == \"bond\" or choice == \"BOND\"\r\n value = float(input(\"\\nPlease enter the present value of the house: \")) # Ask the user the present value of the house.\r\n int_rte = float(input(\"Please enter the interest rate (only the number not the percentage symbol): \")) # Ask the user to enter the interest rate (just the number).\r\n num_mnths = float(input(\"Please enter the number of months you plan to take to repay the bond: \")) # Ask the user over how many months they want to repay the bond.\r\n conv_rte = (int_rte / 100)/12 # Converting the interest rate to monthly by dividing the annual interest rate by 12.\r\n answer = (conv_rte * value)/(1 - (1 + conv_rte)**(- num_mnths)) # Calculates how much money the user will have to repay each month. \r\n print(\"\\nYou will have to pay R\",round(answer,2),\"every month for the next\",num_mnths,\"months to pay off your bond.\") # Outputs the answer.\r\n\r\n\r\n\r\n\r\n \r\n" }, { "alpha_fraction": 0.7830508351325989, "alphanum_fraction": 0.7830508351325989, "avg_line_length": 54.3125, "blob_id": "c9b05eb3b6a3f642c75dc3d7c273c7284e465cb2", "content_id": "0f2dfe630818560a11f6e8e0e99fb1052130d59a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 885, "license_type": "no_license", "max_line_length": 168, "num_lines": 16, "path": "/README for Financial calculator.md", "repo_name": "WesleyW-code/First-Capstone-Project-Level-1-Python", "src_encoding": "UTF-8", "text": "# First-Capstone-Project-Financial calculator:\n\nIn this project i created a program that allows a user to choose between an investment or bond. \n\nIf they select Investment it will:\n- Prompt to enter the amount they want to invest.\n- Prompt to enter the interest rate received for the investment.\n- Prompt to enter the amount of years they want to invest for.\n- Choose between compound or simple interest.\n- According to what they choose it will calculate the total that the investment has grown to after the amount of years ,with the interest rate and the type of interest.\n\nIf they select Bond it will:\n- Prompt to input the amount that the house is worth.\n- Then enter the interest rate.\n- Then enter over how long it will take for the bond to be paid.\n- According to this information it will calculate how much to pay each month to finish paying off your bond in the required time.\n" } ]
2
libraryhackers/library-callnumber-lc
https://github.com/libraryhackers/library-callnumber-lc
295a2195df415131546650fc396bd6c143f882f6
5ce1d7ab5cca7533bad406f37cc3e718ac37c0fb
eed3f0cffd7f9bd1dfe3771d92b89262e22f7343
refs/heads/master
2021-05-16T03:13:31.315091
2014-12-12T17:46:05
2014-12-12T18:04:16
32,171,460
35
4
null
2015-03-13T17:48:03
2016-09-21T17:23:03
2015-03-13T17:51:42
Perl
[ { "alpha_fraction": 0.5033987760543823, "alphanum_fraction": 0.5917673707008362, "avg_line_length": 33.389610290527344, "blob_id": "8e4519e35e557b1008b247bc1b1ee39604dc9f72", "content_id": "3febccbf1a02fc29e9f410b7cbf49ec7439b0d94", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2648, "license_type": "permissive", "max_line_length": 78, "num_lines": 77, "path": "/python/test/main.py", "repo_name": "libraryhackers/library-callnumber-lc", "src_encoding": "UTF-8", "text": "import unittest\nimport callnumber\n\n\nlccns = {\n 'HE8700.7 .P6T44 1983': ['HE', '8700.7', '.P6', 'T44', '1983'],\n 'BS2545.E8 H39 1996': ['BS', '2545', '.E8', 'H39', '1996'],\n 'NX512.S85 A4 2006': ['NX', '512', '.S85', 'A4', '2006'],\n}\n\nlccns_with_blanks = {\n 'HE8700.7 .P6T44 1983': ['HE', '8700.7', '.P6', 'T44', '', '1983'],\n 'BS2545.E8 1996': ['BS', '2545', '.E8', '', '', '1996'],\n 'NX512.S85 A4': ['NX', '512', '.S85', 'A4', '', ''],\n}\n\n\nclass CallNumberTest(unittest.TestCase):\n\n def test_00_simple_normalization(self):\n lccn = callnumber.LC('A')\n self.assertTrue(lccn.denormalized, 'A')\n self.assertTrue(lccn.normalized, 'A')\n\n def test_01_compound_normalization(self):\n lccn = callnumber.LC('A11.1')\n self.assertTrue(lccn.denormalized, 'A11.1')\n self.assertTrue(lccn.normalized, 'A 001110')\n\n def test_02_normalize_module_method(self):\n self.assertTrue(callnumber.normalize('B11'), 'B 0011')\n\n def test_03_module_method_with_cutters(self):\n self.assertTrue(callnumber.normalize('A 123.4 .c11'), 'A 012340C110')\n self.assertTrue(callnumber.normalize('B11 .c13 .d11'),\n 'B 001100C130D110')\n self.assertTrue(callnumber.normalize('B11 .c13 .d11'),\n 'B 001100C130D119~999')\n\n def test_04_simple_range(self):\n lccn = callnumber.LC('A')\n self.assertTrue(lccn.range_start, 'A')\n self.assertTrue(lccn.range_end, 'A~~')\n\n def test_05_compound_range(self):\n lccn = callnumber.LC('A11.1')\n self.assertTrue(lccn.range_start, 'A 001110')\n self.assertTrue(lccn.range_end, 'A 001119~999~999~999')\n\n def test_06_start_of_range_equivalence(self):\n for lccn in lccns:\n lccn = callnumber.LC(lccn)\n self.assertTrue(lccn.normalized, lccn.range_start)\n\n def test_07_components_no_blanks(self):\n for lccn in lccns:\n expected = lccns[lccn]\n comps = callnumber.LC(lccn).components()\n self.assertTrue(lccn)\n self.assertEqual(len(expected), len(comps))\n self.assertEqual(expected, comps)\n\n def test_08_components_no_blanks(self):\n for lccn in lccns_with_blanks:\n expected = lccns_with_blanks[lccn]\n comps = callnumber.LC(lccn).components(include_blanks=True)\n self.assertTrue(lccn)\n self.assertEqual(len(expected), len(comps))\n self.assertEqual(expected, comps)\n\n\ndef suite():\n test_suite = unittest.makeSuite(CallNumberTest, 'test')\n return test_suite\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.7184466123580933, "alphanum_fraction": 0.7184466123580933, "avg_line_length": 17.727272033691406, "blob_id": "f98de442d3553601b48d7f9c8b3b7c8dd5ab9548", "content_id": "1e116812384269d7d3ed874b07df9bd32eb08574", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 206, "license_type": "permissive", "max_line_length": 37, "num_lines": 11, "path": "/python/test.py", "repo_name": "libraryhackers/library-callnumber-lc", "src_encoding": "UTF-8", "text": "import unittest\nfrom test import main\n\n\ndef suite():\n test_suite = unittest.TestSuite()\n test_suite.addTest(main.suite())\n return test_suite\n\nrunner = unittest.TextTestRunner()\nrunner.run(suite())\n" }, { "alpha_fraction": 0.6547192335128784, "alphanum_fraction": 0.6594982147216797, "avg_line_length": 32.47999954223633, "blob_id": "dad8f16315b7342c8f05a48eea72237267a8a5bc", "content_id": "da210dbfbd3176c7c32cfbf2be5dc6be2b5f5941", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 837, "license_type": "permissive", "max_line_length": 79, "num_lines": 25, "path": "/python/setup.py", "repo_name": "libraryhackers/library-callnumber-lc", "src_encoding": "UTF-8", "text": "from setuptools import setup\n\nclassifiers = \"\"\"\n Intended Audience :: Developers\n Intended Audience :: Information Technology\n License :: OSI Approved :: MIT License\n Programming Language :: Python\n Development Status :: 4 - Beta\n Topic :: Text Processing :: General\n Topic :: Utilities\n\"\"\"\n\nsetup(\n name = 'callnumber',\n description = '''normalize Library of Congress call numbers and create\nranges of call numbers''',\n version = '0.1.0', # remember to update callnumber/__init__.py on release!\n url = 'http://code.google.com/p/library-callnumber-lc/',\n author = 'Michael J. Giarlo',\n author_email = '[email protected]',\n license = 'http://www.opensource.org/licenses/mit-license.php',\n packages = ['callnumber'],\n test_suite = 'test',\n classifiers = [c.strip() for c in classifiers.splitlines() if c],\n)\n" }, { "alpha_fraction": 0.4528387188911438, "alphanum_fraction": 0.47206515073776245, "avg_line_length": 24.853801727294922, "blob_id": "707cf923b9d129021d782c54c77e0b24cae27c3a", "content_id": "29ceb5b0f4e1730d0148fd2da4f13f1cdea25c89", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4421, "license_type": "permissive", "max_line_length": 66, "num_lines": 171, "path": "/python/callnumber/__init__.py", "repo_name": "libraryhackers/library-callnumber-lc", "src_encoding": "UTF-8", "text": "import re\n\n__version__ = '0.1.0'\n\njoiner = ''\ntopspace = ' '\nbottomspace = '~'\ntopdigit = '0'\nbottomdigit = '9'\nweird_re = re.compile(r'^\\s*[A-Z]+\\s*\\d+\\.\\d+\\.\\d+')\nlccn_re = re.compile(r'''^\n \\s*\n (?:VIDEO-D)? # for video stuff\n (?:DVD-ROM)? # DVDs, obviously\n (?:CD-ROM)? # CDs\n (?:TAPE-C)? # Tapes\n \\s*\n ([A-Z]{1,3}) # alpha\n \\s*\n (?: # optional numbers with optional decimal point\n (\\d+)\n (?:\\s*?\\.\\s*?(\\d+))?\n )?\n \\s*\n (?: # optional cutter\n \\.? \\s*\n ([A-Z]) # cutter letter\n \\s*\n (\\d+ | \\Z) # cutter numbers\n )?\n \\s*\n (?: # optional cutter\n \\.? \\s*\n ([A-Z]) # cutter letter\n \\s*\n (\\d+ | \\Z) # cutter numbers\n )?\n \\s*\n (?: # optional cutter\n \\.? \\s*\n ([A-Z]) # cutter letter\n \\s*\n (\\d+ | \\Z) # cutter numbers\n )?\n (\\s+.+?)? # everthing else\n \\s*$\n ''', re.VERBOSE)\n\n\ndef normalize(lc, bottom=False):\n lc = lc.upper()\n bottomout = bottom\n\n if re.match(weird_re, lc):\n return None\n\n m = re.match(lccn_re, lc)\n if not m:\n return None\n\n origs = m.groups('')\n (alpha, num, dec, c1alpha, c1num,\n c2alpha, c2num, c3alpha, c3num, extra) = origs\n\n if (len(dec) > 2):\n return None\n\n if alpha and not (num or dec or c1alpha or c1num or c2alpha \\\n or c2num or c3alpha or c3num):\n if extra:\n return None\n if bottomout:\n return alpha + bottomspace * (3 - len(alpha))\n return alpha\n\n enorm = re.sub(r'[^A-Z0-9]', '', extra)\n num = '%04d' % int(num)\n\n topnorm = [\n alpha + topspace * (3 - len(alpha)),\n num + topdigit * (4 - len(num)),\n dec + topdigit * (2 - len(dec)),\n c1alpha if c1alpha else topspace,\n c1num + topdigit * (3 - len(c1num)),\n c2alpha if c2alpha else topspace,\n c2num + topdigit * (3 - len(c2num)),\n c3alpha if c3alpha else topspace,\n c3num + topdigit * (3 - len(c3num)),\n ' ' + enorm,\n ]\n\n bottomnorm = [\n alpha + bottomspace * (3 - len(alpha)),\n num + bottomdigit * (4 - len(num)),\n dec + bottomdigit * (2 - len(dec)),\n c1alpha if c1alpha else bottomspace,\n c1num + bottomdigit * (3 - len(c1num)),\n c2alpha if c2alpha else bottomspace,\n c2num + bottomdigit * (3 - len(c2num)),\n c3alpha if c3alpha else bottomspace,\n c3num + bottomdigit * (3 - len(c3num)),\n ' ' + enorm,\n ]\n\n if extra:\n return joiner.join(topnorm)\n\n topnorm.pop()\n bottomnorm.pop()\n\n inds = range(1, 9)\n inds.reverse()\n for i in inds:\n end = topnorm.pop()\n if origs[i]:\n if bottomout:\n end = joiner.join(bottomnorm[i:])\n return joiner.join(topnorm) + joiner + end\n\n\nclass LC(object):\n\n def __init__(self, callno):\n try:\n self.denormalized = callno.upper()\n except AttributeError:\n print \"*** ERROR: '%s' not a string?\" % (callno)\n self.normalized = normalize(callno)\n\n def __unicode__(self):\n return self.normalized\n\n def __str__(self):\n return self.normalized\n\n @property\n def range_start(self):\n return self.normalized\n\n @property\n def range_end(self):\n return normalize(self.denormalized, True)\n\n def components(self, include_blanks=False):\n if re.match(weird_re, self.denormalized):\n return None\n\n m = re.match(lccn_re, self.denormalized)\n if not m:\n return None\n\n (alpha, num, dec, c1alpha, c1num, c2alpha, c2num,\n c3alpha, c3num, extra) = m.groups('')\n\n if dec:\n num += '.%s' % dec\n\n c1 = ''.join((c1alpha, c1num))\n c2 = ''.join((c2alpha, c2num))\n c3 = ''.join((c3alpha, c3num))\n\n if re.search(r'\\S', c1):\n c1 = '.%s' % c1\n\n comps = []\n for comp in (alpha, num, c1, c2, c3, extra):\n if not re.search(r'\\S', comp) and not include_blanks:\n continue\n comp = re.match(r'^\\s*(.*?)\\s*$', comp).group(1)\n comps.append(comp)\n return comps\n" } ]
4
BartekCITSystems/Test
https://github.com/BartekCITSystems/Test
1c7bf8a4dda9851fa410566a684977ab8a95d145
5bba69fc412827b1a8e6136864aa8f32485c6ba9
30780d9079aba0ff434b7af49e11b4555d145e79
refs/heads/master
2018-04-13T11:06:47.346264
2017-02-05T18:26:25
2017-02-05T18:26:25
81,009,946
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8305084705352783, "alphanum_fraction": 0.8983050584793091, "avg_line_length": 58, "blob_id": "bbcb581ad9830f3e6b3829e0e4387927b4ffdb61", "content_id": "009bb7cce397b274e7ca8276b4d5ca46105d8d37", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 59, "license_type": "no_license", "max_line_length": 58, "num_lines": 1, "path": "/Index.py", "repo_name": "BartekCITSystems/Test", "src_encoding": "UTF-8", "text": "print('Index 1235erdetdstssssssssssssssssssssssssssssssr')\n" } ]
1
ojarva/hsl-poikkeusinfo
https://github.com/ojarva/hsl-poikkeusinfo
5800a32cc30318309343bbdbb96958ba125b2a90
63b2cf9a81c2f864d75e13b2c02a6f4cfa4741a6
880ba1a51b493a5d4531428fdc35bd71c65758cf
refs/heads/master
2023-05-24T20:44:53.629547
2023-03-28T06:19:47
2023-03-28T06:19:47
52,455,469
0
0
MIT
2016-02-24T16:08:00
2023-03-28T06:19:53
2023-05-23T00:51:20
Python
[ { "alpha_fraction": 0.4262295067310333, "alphanum_fraction": 0.688524603843689, "avg_line_length": 14.25, "blob_id": "36107cf2c50dece361303cf1cfec0fdd4ac815c3", "content_id": "33ed30e01c85f6e6800d522046ca159b981e71d5", "detected_licenses": [ "MIT", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 61, "license_type": "permissive", "max_line_length": 17, "num_lines": 4, "path": "/requirements.txt", "repo_name": "ojarva/hsl-poikkeusinfo", "src_encoding": "UTF-8", "text": "redis==4.5.3\nrequests==2.25.1\nxmltodict==0.10.1\npytz>=2020.5\n" }, { "alpha_fraction": 0.38780805468559265, "alphanum_fraction": 0.4163424074649811, "avg_line_length": 19.83783721923828, "blob_id": "9edcc1f9287ca4c231d51a1cd95257fb313c8c68", "content_id": "906f9a45e9de1966fc4fd9176bf63d79e0e7fb37", "detected_licenses": [ "MIT", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 771, "license_type": "permissive", "max_line_length": 39, "num_lines": 37, "path": "/local_settings.py", "repo_name": "ojarva/hsl-poikkeusinfo", "src_encoding": "UTF-8", "text": "\"\"\"\nSettings for poikkeusinfo.py\n\"\"\"\n\nLINES = {\n \"6(T)\": {\n \"line_type\": \"tram\",\n \"numbers\": [\"6\", \"6T\"],\n \"directions\": [\"to_centrum\"],\n },\n \"7B\": {\n \"line_type\": \"tram\",\n \"numbers\": [\"7B\"],\n \"directions\": [\"from_centrum\"],\n },\n \"7A\": {\n \"line_type\": \"tram\",\n \"numbers\": [\"7A\"],\n \"directions\": [\"to_centrum\"],\n },\n \"metro\": {\n \"line_type\": \"metro\",\n \"directions\": [\"to_centrum\"],\n },\n \"64\": {\n \"directions\": [\"from_centrum\"],\n \"line_type\": \"helsinki\",\n \"numbers\": [\"64\"]\n },\n \"65A/66A\": {\n \"directions\": [\"to_centrum\"],\n \"line_type\": \"helsinki\",\n \"numbers\": [\"65A\", \"66A\"],\n }\n}\n\nFETCH_INTERVAL = 60 * 3 # seconds\n" }, { "alpha_fraction": 0.7471910119056702, "alphanum_fraction": 0.75, "avg_line_length": 26.461538314819336, "blob_id": "00cdac2c3166112b78cf4666b144e428dddc85c9", "content_id": "5032ee2b3366114741151a2d197f3a30b1a50cdc", "detected_licenses": [ "MIT", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 356, "license_type": "permissive", "max_line_length": 132, "num_lines": 13, "path": "/README.md", "repo_name": "ojarva/hsl-poikkeusinfo", "src_encoding": "UTF-8", "text": "HSL Poikkeusinfo\n================\n\nThis small program fetches traffic exceptions for greater Helsinki area public transportation.\n\nA local redis instance is assumed for publishing changes.\n\nTo run, a) setup a redis instance, b) install dependencies (pip install -r requirements.txt) and run with `python3 poikkeusinfo.py`.\n\nLicense\n-------\n\nSee LICENSE.txt" }, { "alpha_fraction": 0.5495738983154297, "alphanum_fraction": 0.5612094402313232, "avg_line_length": 34.3739128112793, "blob_id": "548e289b7bd37cd8ac858544c304a716e6c11850", "content_id": "ca34415ab8b9955ddc88891e2b86ac967102090a", "detected_licenses": [ "MIT", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12261, "license_type": "permissive", "max_line_length": 189, "num_lines": 345, "path": "/poikkeusinfo.py", "repo_name": "ojarva/hsl-poikkeusinfo", "src_encoding": "UTF-8", "text": "# encoding=utf-8\n\n\"\"\"\nClasses for downloading, parsing and filtering poikkeusinfo.fi xml files.\n\"\"\"\n\nfrom local_settings import LINES, FETCH_INTERVAL\nimport datetime\nimport glob\nimport json\nimport pprint\nimport re\nimport redis\nimport requests\nimport time\nimport xmltodict\nimport pytz\nimport logging\n\n\nclass DateTimeEncoder(json.JSONEncoder):\n \"\"\" Encodes items with datetime objects properly \"\"\"\n\n def default(self, o):\n if isinstance(o, datetime.datetime):\n return o.isoformat()\n\n return json.JSONEncoder.default(self, o)\n\n\nclass PoikkeusInfoParser(object):\n \"\"\" Parses poikkeusinfo XML.\n\n See http://developer.reittiopas.fi/media/Poikkeusinfo_XML_rajapinta_V2_2_01.pdf\n \"\"\"\n\n # Different time formats for estimated lengths\n TIME_RE = [\n re.compile(r\"^(?P<start_time>([0-9]{1,2}:[0-9]{2})|([0-9]{1,2}))\\s*-\\s*(?P<end_date>[0-9]{1,2}\\.[0-9]{2})\\.{0,1}\\s*(klo|kello)\\.*\\s*(?P<end_time>([0-9]{2}:[0-9]{2})|([0-9]{1,2}))\"),\n re.compile(r\"^(?P<end_time>([0-9]{1,2}:[0-9]{2})|([0-9]{1,2}))\\s*(asti|)(\\.|)$\"),\n re.compile(r\"^(?P<start_time>([0-9]{1,2}:[0-9]{2})|([0-9]{1,2}))\\s*-\\s*(?P<end_time>([0-9]{1,2}:[0-9]{2})|([0-9]{1,2}))\"),\n ]\n\n # Formats for estimated length dates\n DATE_FORMATS = [\n \"%d.%m\",\n \"%d.%m.\",\n \"%d.%m.%Y\",\n \"%d..%m\",\n \"%d..%m.%Y\",\n ]\n\n # Formats for estimated length times\n TIME_FORMATS = [\n \"%H:%M\",\n \"%H\",\n ]\n\n # Mapping for notification types (see pdf)\n TYPE_MAP = {\n \"1\": \"advance_info\",\n \"2\": \"urgent_info\",\n }\n\n # Mapping for notification sources (see pdf)\n SOURCE_MAP = {\n \"1\": \"manual\", # manually entered\n \"2\": \"automatic\", # automatically imported from other HSL systems\n }\n\n # Mapping for line types (see pdf)\n LINETYPE_MAP = {\n \"1\": \"helsinki\",\n \"2\": \"tram\",\n \"3\": \"espoo\",\n \"4\": \"vantaa\",\n \"5\": \"regional_traffic\",\n \"6\": \"metro\",\n \"7\": \"ferry\",\n \"12\": \"train\",\n \"14\": \"all\",\n \"36\": \"kirkkonummi\",\n \"39\": \"kerava\",\n }\n\n # Regex for fetching reason phrase\n REASON_RE = re.compile(r\".*Syy:\\s*(?P<reason>[^\\.]*)\\.*\")\n\n # Mapping for departure directions (see pdf)\n DIRECTION_MAP = {\n \"1\": \"from_centrum\",\n \"2\": \"to_centrum\",\n }\n\n # Mapping for reasons - some typo fixes and unifying.\n REASON_MAP = {\n \"Helsinki City Marathon\": \"yleisötapahtuma\",\n \"maraton\": \"yleisötapahtuma\",\n \"sambakulkue\": \"yleisötapahtuma\",\n \"liukkaus\": \"sääolosuhteet\",\n \"tien liukkaus\": \"sääolosuhteet\",\n \"Helsinki City Run\": \"yleisötapahtuma\",\n \"Vantaa Triathlon\": \"yleisötapahtuma\",\n \"lehtikelin aiheuttama liukkaus\": \"sääolosuhteet\",\n \"keliolosuhteet\": \"sääolosuhteet\",\n \"tekninen häiriö\": \"tekninen vika\",\n \"Tietyö\": \"tietyö\",\n \"Sääolosuhteet\": \"sääolosuhteet\",\n \"kulkue\": \"yleisötapahtuma\",\n \"juoksutapahtuma\": \"yleisötapahtuma\",\n \"virtahäiriö\": \"tekninen vika\",\n \"Työnseisaus\": \"lakko\",\n \"tie poikki (viranomaisten toimesta)\": \"tie poikki\",\n \"työnseisaus\": \"lakko\",\n \"tietyömaa\": \"tietyö\",\n \"työmaa\": \"tietyö\",\n \"vaihdevika\": \"tekninen vika radassa\",\n \"kiskotyöt\": \"ratatyöt\",\n \"Este tiellä\": \"este tiellä\",\n \"sääolosuhteet, ajolangat jäätyy\": \"sääolosuhteet\",\n \"väärin pysäköidyt autot\": \"väärin pysäköity auto\",\n \"väärin pysäköity auito\": \"väärin pysäköity auto\",\n }\n\n def parse_length(self, reason, timestamp):\n \"\"\" Parses 'estimated length' from freetext field \"\"\"\n\n if \"Arvioitu kesto: \" not in reason:\n return None\n\n helsinki = pytz.timezone(\"Europe/Helsinki\")\n\n estimated_length = reason.split(\"Arvioitu kesto: \")[1]\n for regex in self.TIME_RE:\n match = regex.match(estimated_length)\n if not match:\n continue\n\n parsed_timestamp = datetime.datetime(1900, 1, 1)\n try:\n end_date = match.group(\"end_date\")\n for date_format in self.DATE_FORMATS:\n try:\n day_part = datetime.datetime.strptime(end_date, date_format)\n parsed_timestamp += (day_part - datetime.datetime(1900, 1, 1))\n break\n except ValueError:\n pass\n except IndexError:\n parsed_timestamp += (datetime.datetime(1900, timestamp.month, timestamp.day) - datetime.datetime(1900, 1, 1))\n\n for time_format in self.TIME_FORMATS:\n try:\n time_part = datetime.datetime.strptime(match.group(\"end_time\"), time_format)\n parsed_timestamp += (time_part - datetime.datetime(1900, 1, 1))\n parsed_timestamp += (datetime.datetime(timestamp.year, 1, 1) - datetime.datetime(1900, 1, 1))\n return helsinki.localize(parsed_timestamp)\n except ValueError:\n pass\n return None\n\n def parse_reason(self, text):\n \"\"\" Parses reason information from freetext field, if available. Returns None if no match is found. \"\"\"\n match = self.REASON_RE.match(text)\n if match:\n reason = match.group(\"reason\").encode(\"utf-8\").strip()\n reason = self.REASON_MAP.get(reason, reason)\n return reason\n return None\n\n def parse_info(self, info, timestamp):\n \"\"\" Parses info field, including length and reason \"\"\"\n text_item = None\n if isinstance(info[\"TEXT\"], list):\n for item in info[\"TEXT\"]:\n if item[\"@lang\"] == \"fi\":\n text_item = item\n break\n elif \"TEXT\" in info:\n text_item = info[\"TEXT\"]\n if text_item:\n if \"#text\" in text_item:\n data = {\n \"length\": self.parse_length(text_item[\"#text\"], timestamp),\n \"reason\": self.parse_reason(text_item[\"#text\"]),\n \"text\": text_item[\"#text\"],\n }\n return data\n return None\n\n def parse_targets(self, targets):\n \"\"\" Parses targets (affected lines) \"\"\"\n if targets is None:\n return None\n lines = []\n for k, target in targets.items():\n if k == \"LINE\":\n if not isinstance(target, list):\n target = [target]\n for line in target:\n lines.append({\"id\": line[\"@id\"], \"direction\": self.DIRECTION_MAP.get(line[\"@direction\"]), \"type\": self.LINETYPE_MAP.get(line[\"@linetype\"]), \"number\": line[\"#text\"]})\n return lines\n\n @classmethod\n def parse_isoformat(cls, time_string):\n \"\"\" Parses ISO-8601 datetimes (without timezone) to python datetime \"\"\"\n helsinki = pytz.timezone(\"Europe/Helsinki\")\n return helsinki.localize(datetime.datetime.strptime(time_string, \"%Y-%m-%dT%H:%M:%S\"))\n\n def parse_validity(self, validity):\n \"\"\" Parses notification validity timestamps and \"valid\" tag.\n\n If \"valid\" is False, notification should be hidden from the user. \"\"\"\n data = {\n \"valid\": validity[\"@status\"] == \"1\",\n \"from\": self.parse_isoformat(validity[\"@from\"]),\n \"to\": self.parse_isoformat(validity[\"@to\"]),\n }\n return data\n\n def parse_item(self, item, timestamp):\n \"\"\" Parses a single deserialized item \"\"\"\n data = {\n \"id\": item[\"@id\"],\n \"type\": self.TYPE_MAP[item[\"@type\"]],\n \"source\": self.SOURCE_MAP[item[\"@source\"]],\n \"info\": self.parse_info(item[\"INFO\"], timestamp),\n \"lines\": self.parse_targets(item[\"TARGETS\"]),\n \"validity\": self.parse_validity(item[\"VALIDITY\"]),\n }\n return data\n\n def parse(self, content, timestamp):\n \"\"\" Parses XML from poikkeusinfo.fi \"\"\"\n parsed = xmltodict.parse(content)\n if \"DISRUPTIONS\" not in parsed:\n return\n parsed = parsed[\"DISRUPTIONS\"]\n items = []\n if \"DISRUPTION\" in parsed:\n disruptions = parsed[\"DISRUPTION\"]\n if not isinstance(disruptions, list):\n disruptions = [disruptions]\n for item in disruptions:\n items.append(self.parse_item(item, timestamp))\n return items\n\n\nclass PoikkeusInfoFilter(object):\n \"\"\" Filters entries based on configuration dictionary. \"\"\"\n\n def __init__(self, config):\n self.config = config\n\n def filter_item(self, item):\n \"\"\" Checks whether a single item should be included. Returns either None or item \"\"\"\n\n if not item[\"validity\"][\"valid\"] or item[\"lines\"] is None:\n return None\n for line_name, config in self.config.items():\n for filter_by in item[\"lines\"]:\n if \"line_type\" in config and filter_by[\"type\"] != config[\"line_type\"]:\n continue\n if \"directions\" in config and filter_by[\"direction\"] not in config[\"directions\"]:\n continue\n if \"numbers\" in config and filter_by[\"number\"] not in config[\"numbers\"]:\n continue\n item[\"display_name\"] = line_name\n return item\n\n def filter(self, lines):\n \"\"\" Filters a list of items. \"\"\"\n\n filtered_lines = []\n for line in lines:\n filtered = self.filter_item(line)\n if filtered:\n filtered_lines.append(filtered)\n return filtered_lines\n\n\nclass PoikkeusInfoRunner(object):\n \"\"\" Fetches XML from poikkeusinfo.fi, parse, filters and publishes to redis.\n\n Fetch interval is configured by FETCH_INTERVAL variable.\n \"\"\"\n\n def __init__(self):\n self.pip = PoikkeusInfoParser()\n self.pif = PoikkeusInfoFilter(LINES)\n self.redis_instance = redis.StrictRedis()\n self.last_run_at = None\n self.logger = logging.getLogger(\"poikkeusinfo-runner\")\n self.logger.setLevel(logging.INFO)\n format_string = \"%(asctime)s - %(levelname)s - %(message)s\"\n formatter = logging.Formatter(format_string)\n ch = logging.StreamHandler()\n ch.setFormatter(formatter)\n self.logger.addHandler(ch)\n\n def fetch(self):\n \"\"\" A single fetch. Returns False on failure. Saves and publishes updates to redis. \"\"\"\n\n resp = requests.get(\"http://www.poikkeusinfo.fi/xml/v2/fi\")\n if resp.status_code != 200:\n self.logger.info(\"Fetching failed with status code %s\", resp.status_code)\n return False\n parsed = self.pip.parse(resp.content, datetime.datetime.now())\n filtered = self.pif.filter(parsed)\n dumped = json.dumps(filtered, cls=DateTimeEncoder)\n self.redis_instance.setex(\"hsl-poikkeusinfo\", 3600, dumped)\n self.redis_instance.publish(\"home:broadcast:generic\", json.dumps({\"key\": \"poikkeusinfo\", \"content\": filtered}, cls=DateTimeEncoder))\n return filtered\n\n def run(self):\n \"\"\" Runner for periodic fetching and publishing. Configure interval with FETCH_INTERVAL variable. \"\"\"\n self.last_run_at = time.time()\n while True:\n self.logger.info(\"Starting\")\n self.fetch()\n sleep_time = max(FETCH_INTERVAL / 2, FETCH_INTERVAL - (time.time() - self.last_run_at))\n self.logger.info(\"Sleeping %ss\", sleep_time)\n time.sleep(sleep_time)\n\n\ndef main_testing():\n \"\"\" Runs all .xml files and prints the results \"\"\"\n pip = PoikkeusInfoParser()\n pif = PoikkeusInfoFilter(LINES)\n for filename in glob.glob(\"*.xml\"):\n timestamp = datetime.datetime.now()\n content = open(filename).read()\n parsed = pip.parse(content, timestamp)\n filtered = pif.filter(parsed)\n if len(filtered) > 0:\n pprint.pprint(filtered)\n\n\ndef main_run():\n \"\"\" Starts periodic download/parse/filter/publish cycle in foreground \"\"\"\n pir = PoikkeusInfoRunner()\n pir.run()\n\nif __name__ == '__main__':\n main_run()\n" } ]
4
steffenheyne/UCSC_trackHub_generator
https://github.com/steffenheyne/UCSC_trackHub_generator
81d6fda75dc24067a3ebc0f65d054e14b6f872cd
e275d5d6966c61eb73795db94b3f200683c4780f
7704f547d9608fb94da5df4f539b99d6b3f7cd10
refs/heads/master
2021-06-14T06:29:18.941698
2021-05-25T20:03:04
2021-05-25T20:03:04
197,746,206
0
2
null
null
null
null
null
[ { "alpha_fraction": 0.696177065372467, "alphanum_fraction": 0.7082495093345642, "avg_line_length": 32.13333511352539, "blob_id": "44653db4d910a2261a7a2b60c010caa655e4b030", "content_id": "0bcee92935601a2971f536119a7b320b7da374f3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 994, "license_type": "permissive", "max_line_length": 89, "num_lines": 30, "path": "/README.md", "repo_name": "steffenheyne/UCSC_trackHub_generator", "src_encoding": "UTF-8", "text": "# UCSC trackHub generator\n\npython script to creates a UCSC trackhub configuration from local directory structure\n\nIt parses a local directory structure and maps this to a UCSC trackDb.txt config\n\nAuthor: Steffen Heyne, MPI-IE Freiburg, Germany\n\n## usage example\n\n cd example/\n trackHub_generator.py -o mm10_upload mm10/\n\nParses mm10/ directory and writes tracksDb.txt to mm10_upload/ \nIt also links (symbolic) all used files to mm10_upload/ for easy cloud upload afterwards.\n\nRecognized subdirs (in the example under mm10/)\n \n Allowed directory names: *.multiwig\n *.composite\n *.super\n \n Only one nesting level is supported by UCSC! So only super containers can hold one \n level of multiwig or composite containers, not more! \n\n composite/ multiwig containers can only contain tracks.\n \n The toplevel can contain tracks (not related to any container)\n \n Current code only supports *.bw|*.bigwig or *.bb|*.bigbed tracks!\n" }, { "alpha_fraction": 0.5212580561637878, "alphanum_fraction": 0.5489805936813354, "avg_line_length": 33.97391128540039, "blob_id": "ed08c7528f65e06a2dbfff7790083522c95f33f0", "content_id": "ea7d991d4ca90ee650c67cddda075d41745572b4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16088, "license_type": "permissive", "max_line_length": 275, "num_lines": 460, "path": "/trackHub_generator.py", "repo_name": "steffenheyne/UCSC_trackHub_generator", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\n## Author: Steffen Heyne, MPI-IE Freiburg, Germany\n## script that parses a local directory structure and maps this to a UCSC trackDB.txt config\n## https://github.com/steffenheyne/UCSC_trackHub_generator\n\n## UCSC docu\n## https://genome.ucsc.edu/goldenPath/help/trackDb/trackDbHub.html\n\nimport argparse\nimport os.path\nimport glob\nimport pprint\nimport functools\nimport re\nimport yaml\nimport sys\n\nargs = None\ntrackCounter = 1\n\npp = pprint.PrettyPrinter()\n\n## bigwig colors\n## keys are used as regex pattern against filename/ 'track' key in dict\nbigwig_colors = {\n \"CD24.*H3K27ac\": \"252,78,42\",\n \"CD24.*H3K27me3\": \"140,107,177\",\n \"input\": \"150,150,150\",\n \"H3K4me1\": \"65,171,93\",\n \"H3K4me2\": \"161,217,155\",\n \"H3K27ac\": \"252,78,42\",\n \"H3K4me3\": \"203,24,29\",\n \"H3K36me3\": \"254,196,79\",\n \"H3K27me3\": \"140,107,177\",\n \"H3K27me2\": \"147,123,173\",\n \"H2AK119Ub\":\"184,151,191\",\n \"H3K27me1\": \"230,179,99\",\n \"H3K9me3\": \"29,145,192\",\n \"H3K9me2\": \"51,51,255\",\n \"H3K9ac\": \"164,0,0\", # 252,146,114\",\n \"CTCF\": \"106,81,163\",\n \"WGBS\": \"0,102,255\",\n \"methyl\": \"0,102,255\",\n \"RNA.*fwd\": \"0,102,0\",\n \"RNA.*rev\": \"153,51,0\",\n \"RNA.*RPKM\":\"71,107,107\",\n \"RNA\": \"71,107,107\",\n \"DNase\": \"0,204,102\",\n \"Hp1a\": \"0,128,255\",\n \"H1\": \"255,102,255\",\n \"Rpb1\": \"173,68,2\"\n }\n\n\nmultiwig_default = {\"track\": None,\n \"type\": \"bigWig\",\n \"container\": \"multiWig\",\n \"parent\": None,\n \"shortLabel\":None,\n \"longLabel\": None,\n \"aggregate\": \"transparentOverlay\",\n \"showSubtrackColorOnUi\": \"on\",\n \"priority\": 1,\n \"html\": \"examplePage\"\n}\n\n\nbigwig_default = {\"track\": None,\n \"type\": \"bigWig\",\n \"parent\": None,\n \"bigDataUrl\": None,\n \"shortLabel\":None,\n \"longLabel\": None,\n \"color\": \"255,0,0\"\n }\n\n## bigwig configuration that can be part of multiwig container or \n## individual bigwig track (if track is not in multiwig container) \nbigwig_combined = {\"visibility\": \"hide\",\n \"maxHeightPixels\": \"500:20:8\",\n \"viewLimits\": \"0:20\",\n \"alwaysZero\": \"on\",\n \"autoScale\": \"off\",\n \"windowingFunction\": \"mean+whiskers\",\n \"priority\": 1\n }\n\nbigbed_default = {\n \"track\": None,\n \"parent\": None,\n \"bigDataUrl\": None,\n \"shortLabel\":None,\n \"longLabel\": None,\n \"type\": \"bigBed 3 +\",\n \"itemRgb\": \"on\",\n \"color\": \"255,0,0\",\n \"visibility\": \"squish\",\n \"maxItems\": \"100000\",\n \"maxWindowToDraw\": \"20000000\"\n #\"colorByStrand\": \"255,0,0 0,0,255\"\n }\n\n## specific bigwig configurations\n## either for multiwig container or individual bigwig tracks\n## keys are used as regex pattern against filename/ 'track' key in dict\n## more specific patterns should come first in dict as we break loop after first match\n## if matching, then the specifc track values are added or overwritten\nbigwig_specific = {\n \"EpM93_ND|EpM95_ND\": {\n \"viewLimits\": \"0:10\"},\n \"CD24.*H3K27ac\": {\n \"viewLimits\": \"0:30\"},\n \"CD24.*H3K27me3\": {\n \"viewLimits\": \"0:20\"}, \n \"methyl|WGBS\": {\n \"viewLimits\": \"0:100\",\n \"maxHeightPixels\": \"500:30:8\"},\n \"male.*H3K9me3\": {\n \"viewLimits\": \"0:15\"},\n \"H3K9me3\": {\n \"viewLimits\": \"0:8\"},\n \"H3K27me3\": {\n \"viewLimits\": \"0:14\"},\n \"H3K27me2\": {\n \"viewLimits\": \"0:8\"},\n \"H3K27me1\": {\n \"viewLimits\": \"0:8\"},\n \"H2AK119Ub\": {\n \"viewLimits\": \"0:8\"},\n \"snRNA\": {\n \"viewLimits\": \"0:15\",\n \"maxHeightPixels\": \"500:30:8\",\n \"transformFunc\": \"LOG\"},\n \"RNA\": {\n \"viewLimits\": \"0:30\"},\n }\n\n\nbigbed_specific = {\n \"N25_segmentation.paper_colors\": {\n \"type\": \"bigBed 9 +\",\n \"visibility\": \"dense\"},\n \"Roadmap_6marks\": {\n \"type\": \"bigBed 9 +\",\n \"visibility\": \"dense\"},\n }\n\ncomposite_default = {\"track\": None,\n \"parent\": None,\n \"type\": None,\n \"compositeTrack\": \"on\",\n \"shortLabel\": None,\n \"longLabel\": None,\n \"visibility\": \"hide\",\n \"priority\": 1,\n \"centerLabelsDense\": \"on\",\n \"html\": \"examplePage\"\n} \n\n\nsuper_default = {\"track\": None,\n \"superTrack\": \"on\",\n \"parent\": None,\n \"shortLabel\": None,\n \"longLabel\": None,\n \"priority\": 1,\n \"html\": \"examplePage\"\n}\n\n## adapted from http://code.activestate.com/recipes/577879-create-a-nested-dictionary-from-oswalk/\ndef get_directory_structure(rootdir):\n \"\"\"\n Creates a nested dictionary that represents the folder structure of rootdir\n \"\"\"\n rootdir = rootdir.rstrip(os.sep)\n start = rootdir.rfind(os.sep) + 1\n dir= {\"containers\": [rootdir]} \n for path, dirs, files in os.walk(rootdir):\n folders = path[start:].split(os.sep)\n\n subdir = dict.fromkeys(files)\n parent = functools.reduce(dict.get, folders[:-1], dir)\n \n config = get_container_config(path, folders, subdir)\n \n parent[folders[-1]] = {'containers': dirs}\n parent[folders[-1]].update(config)\n \n return dir\n\n\ndef get_container_config(path, parents, files):\n \"\"\"\n Creates a trackhub container and tracks config based on current \n directory name and file content\n \n Allowed dir names: *.multiwig\n *.composite\n *.super\n \n Only one nesting level is supported by UCSC! So only super containers can hold one \n level of multiwig or composite containers, not more! \n Composite/multiwig containers can only contain tracks.\n \n The toplevel can contain tracks (not related to any container)\n \n Current code only supports *.bw|*.bigwig or *.bb|*.bigbed tracks!\n \"\"\" \n config = { 'tracks': {} }\n container_config = {} \n generatorType = None\n \n if re.match(\".*\\.multiwig$\",parents[-1],re.IGNORECASE):\n container_config.update(multiwig_default)\n container_config.update(bigwig_combined)\n generatorType = \"multiwig\"\n elif re.match(\".*\\.composite$\",parents[-1],re.IGNORECASE):\n container_config.update(composite_default)\n generatorType = \"composite\"\n elif re.match(\".*\\.super$\",parents[-1],re.IGNORECASE):\n container_config.update(super_default)\n generatorType = \"super\"\n elif len(parents)>1: \n sys.exit(\"Every subdir needs to be a multiwig, composite or super container!\")\n \n container_config[\"track\"] = parents[-1]\n container_config[\"shortLabel\"] = parents[-1]\n container_config[\"longLabel\"] = parents[-1]\n \n container_config[\"parent\"] = parents[len(parents)-2]\n \n if generatorType == \"multiwig\":\n for pat in bigwig_specific:\n if re.match(\".*(\"+pat+\")\",container_config[\"track\"],re.IGNORECASE):\n print(\" \".join([\"match \",container_config[\"track\"],\" \",pat]))\n container_config.update(bigwig_specific[pat])\n break\n \n ## toplevel must not have a parent entry\n if len(parents)-2 <= 0:\n container_config.pop('parent',None)\n \n config['tracks'][parents[-1]] = container_config\n \n ## get per track config\n tracks = get_tracks_config(files, generatorType, parents)\n config['tracks'].update(tracks)\n \n ## set type for specific containers \n if generatorType == \"composite\" or generatorType == \"multiwig\":\n tmp = set([v for k in tracks.keys() for kk,v in tracks[k].items() if kk == 'type'])\n multi_track_type = 'bigWig'\n if len(tmp)>0:\n multi_track_type = tmp.pop()\n if len(tmp) > 0:\n sys.exit(\"Only one tracktype allowed in composite or multiwig containers!\")\n if generatorType == \"multiwig\" and multi_track_type != \"bigWig\":\n sys.exit(\"Only bigWig tracks are allowed in multiwig containers!\")\n \n config['tracks'][parents[-1]]['type'] = multi_track_type\n \n ## update configs from config files if found (first *.yaml) in current path\n config = update_config_from_file(path, config)\n\n ## just dump config of current container into its directory\n ## file can be used as starting point to modify/add specific options\n with open(os.path.join(path,\"container_config.used\"), 'w') as f:\n yaml.dump(config['tracks'], f, default_flow_style=False)\n \n return config\n\n\n## configure tracks for current container\ndef get_tracks_config(files, type, parents):\n \"\"\"\n Creates a config per track from 'files'\n 'type' is current container type\n 'parents' is used to get the path and right parent name etc \n \n Current code only supports *.bw|*.bigwig or *.bb|*.bigbed tracks!\n \n \"\"\"\n tracks_config = {}\n global trackCounter\n \n for track_file in files:\n track_config = {}\n ## we have a bigwig file\n if re.match(\".*\\.(bw|bigwig)$\",track_file,re.IGNORECASE):\n track_config.update(bigwig_default)\n if type != \"multiwig\":\n track_config.update(bigwig_combined)\n\n ## toplevel tracks have no parent entry\n if len(parents)-2 > -1:\n track_config[\"parent\"] = parents[-1]\n else:\n track_config.pop('parent',None)\n \n track_config[\"track\"] = \"_\".join([\"track\",str(trackCounter)])\n track_config[\"bigDataUrl\"] = os.path.join(*parents[1:]+[track_file])\n track_config[\"shortLabel\"] = track_file\n track_config[\"longLabel\"] = track_file\n track_config[\"color\"] = get_bigwig_color(track_file,parents[-1])\n trackCounter += 1\n \n if type != \"multiwig\":\n for pat in bigwig_specific:\n if re.match(\".*(\"+pat+\")\",track_file,re.IGNORECASE):\n print(\" \".join([\"match \",track_file,\" \",pat]))\n track_config.update(bigwig_specific[pat])\n break\n \n tracks_config[track_file] = track_config\n ## we have a bigbed file\n elif re.match(\".*\\.(bb|bigbed)$\",track_file,re.IGNORECASE):\n track_config.update(bigbed_default)\n if len(parents)-2 > -1:\n track_config[\"parent\"] = parents[-1]\n else:\n track_config.pop('parent',None)\n \n track_config[\"track\"] = \"_\".join([\"track\",str(trackCounter)])\n track_config[\"bigDataUrl\"] = os.path.join(*parents[1:]+[track_file])\n track_config[\"shortLabel\"] = track_file\n track_config[\"longLabel\"] = track_file\n track_config[\"color\"] = get_bigwig_color(track_file,parents[-1],bigbed_default['color'])\n trackCounter += 1\n \n for pat in bigbed_specific:\n if re.match(\".*(\"+pat+\")\",track_file,re.IGNORECASE):\n print(\" \".join([\"match \",track_file,\" \",pat]))\n track_config.update(bigbed_specific[pat])\n break\n\n tracks_config[track_file] = track_config\n \n return tracks_config\n\n\ndef get_bigwig_color(filename, parent, default=\"255,0,0\"):\n #print([filename,parent])\n for pattern,color in bigwig_colors.items():\n if (re.search(pattern, filename, re.IGNORECASE) or re.search(pattern, parent, re.IGNORECASE)):\n #print([\"match\",pattern])\n return color\n return default\n\n\ndef update_config_from_file(path, config):\n \n ## take first yaml file that is found in path\n config_files = glob.glob(os.path.join(path,\"*.yaml\"))\n if config_files:\n config_file = config_files[0]\n else:\n return config \n \n configFromFile = {}\n if os.path.isfile(config_file):\n with open(config_file, \"r\") as f:\n configFromFile = yaml.load(f)\n \n for tr in config['tracks']:\n if tr in configFromFile:\n config['tracks'][tr].update(configFromFile[tr])\n \n return config\n\n\ndef write_hub(file, hub, depth, in_root, outdir):\n \n ## write out container config section\n for container in hub['containers']:\n \n if depth>0:\n for k,v in hub[container]['tracks'][container].items():\n file.write(\"{m: <{de}}\".format(m='',de=str((depth-1)*5))) \n file.write(\"{} {}\\n\".format(k,v))\n file.write(\"\\n\")\n \n write_hub(file, hub[container], depth+1, in_root, outdir)\n \n ## write out all 'child' tracks of container\n for track in hub[container]['tracks']:\n if container != track:\n for k,v in hub[container]['tracks'][track].items():\n if k=='bigDataUrl':\n v = hub[container]['tracks'][track][k].split(os.sep)[-1]\n ## indentation\n file.write(\"{m: <{de}}\".format(m='',de=str(depth*5)))\n file.write(\"{} {}\\n\".format(k,v))\n ## remove link if we have on old link with same name\n if os.path.islink(os.path.join(os.path.abspath(outdir),track)):\n os.remove(os.path.join(outdir,track))\n ## link track into output dir\n os.symlink(os.path.relpath(os.path.join(in_root,hub[container]['tracks'][track]['bigDataUrl']), outdir), os.path.join(outdir,track))\n file.write(\"\\n\")\n\ndef main():\n\n global trackCounter\n\n parser = argparse.ArgumentParser() \n \n parser.add_argument(\"indir\",\n help=\"input directory\")\n\n parser.add_argument(\"-o\", \"--outputDir\",\n dest=\"outdir\",\n required=True,\n help=\"output directory\")\n \n parser.add_argument(\"-t\", \"--trackDbFilename\",\n dest=\"trackDbFilename\",\n default=\"trackDb.txt\",\n help=\"filename of trackhub config, useful if you use multiple trackDb files \"\n \"for one organism, see also --postContent! (default: '%(default)s')\")\n \n \n parser.add_argument(\"-i\", \"--startIndex\",\n dest=\"startIndex\",\n default=1,\n type=int,\n help=\"numerical index for first track, important if multiple trackDb files are used (default: '%(default)s')\")\n \n parser.add_argument(\"-p\", \"--postContent\",\n dest=\"postContent\",\n default='',\n help=\"string/text that is inserted at the end of generated trackDb, use eg. 'include trackDb.test.txt' to include an additional track config file; Note: you likely need to specify -t -i when you generate 'trackDb.test.txt' ! (default: '%(default)s')\")\n \n args = parser.parse_args()\n \n if args.trackDbFilename == '':\n sys.exit(\"Please provide a filename for -t\") \n \n print(args.outdir)\n print(os.path.abspath(args.indir))\n \n trackCounter = args.startIndex\n \n ## get the hub by parsing directory structure\n hub = get_directory_structure(args.indir)\n\n os.makedirs(args.outdir,exist_ok = True)\n \n ## write hub config to output dir and link all files for upload into it\n with open(os.path.join(args.outdir,args.trackDbFilename), 'w') as f:\n write_hub(f,hub,0, args.indir,args.outdir)\n f.write(args.postContent)\n f.close()\n \n ## just dump hub as yaml file for inspection/debugging\n with open(os.path.join(args.outdir,args.trackDbFilename+\".hub_dict.yaml\"), 'w') as f:\n yaml.dump(hub, f, default_flow_style=False)\n \n\nif __name__ == \"__main__\":\n main()\n" } ]
2
Learsim/LearsimControlPanel
https://github.com/Learsim/LearsimControlPanel
ea718611f1eb0cabe745d3dcafe384e81384f49b
80ae68d7e21caf768582fabcb81862720b1a63ba
1dc81c1e6287dd9f0ed3741ec98d60544debb782
refs/heads/main
2023-06-18T04:38:21.907080
2021-07-16T12:23:57
2021-07-16T12:23:57
352,812,556
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.807947039604187, "alphanum_fraction": 0.807947039604187, "avg_line_length": 36.75, "blob_id": "b450bcca1a8a3a271dd3a6c981c9db1d350ecb2b", "content_id": "9a52d80620c24c3a70d4ddf5d3f46feb81bde898", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 151, "license_type": "permissive", "max_line_length": 62, "num_lines": 4, "path": "/src/Arduino/Output/NeoPixelStrip.ts", "repo_name": "Learsim/LearsimControlPanel", "src_encoding": "UTF-8", "text": "import ArduinoComponent from '../ArduinoComponent';\nimport NeoPixel from './NeoPixel';\n\nexport default class NeoPixelStrip extends ArduinoComponent {}\n" }, { "alpha_fraction": 0.6944444179534912, "alphanum_fraction": 0.6944444179534912, "avg_line_length": 30.5, "blob_id": "f5fdeb35a50d86ff2b909f42fd14c8027e1eff41", "content_id": "005eb014b82c20dbaa44676a5fe874a3b52484c3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 252, "license_type": "permissive", "max_line_length": 75, "num_lines": 8, "path": "/src/API/SimVariables.ts", "repo_name": "Learsim/LearsimControlPanel", "src_encoding": "UTF-8", "text": "import axios from 'axios';\nimport { enumEndpoint } from './API.Common';\n\nexport default function getSimVars(hostname: string): Promise<string[]> {\n return axios.get<string[]>(hostname.concat(enumEndpoint)).then((res) => {\n return res.data;\n });\n}\n" }, { "alpha_fraction": 0.8181818127632141, "alphanum_fraction": 0.8181818127632141, "avg_line_length": 35.66666793823242, "blob_id": "7044e0cc548bd983b8bc4f7cccee3e7a76e3377e", "content_id": "da0e203376b082d003aeeedae468f933f7effb3b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 110, "license_type": "permissive", "max_line_length": 56, "num_lines": 3, "path": "/src/Arduino/Output/Display.ts", "repo_name": "Learsim/LearsimControlPanel", "src_encoding": "UTF-8", "text": "import ArduinoComponent from '../ArduinoComponent';\n\nexport default class Display extends ArduinoComponent {}\n" }, { "alpha_fraction": 0.6432461738586426, "alphanum_fraction": 0.7559912800788879, "avg_line_length": 27.230770111083984, "blob_id": "d7ca356263d44fe5d7549e63a2757448df7634be", "content_id": "1bfb453279170a766357a1f72e6ce94ee1d6c24a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1836, "license_type": "permissive", "max_line_length": 169, "num_lines": 65, "path": "/README.md", "repo_name": "Learsim/LearsimControlPanel", "src_encoding": "UTF-8", "text": "\n<div align=\"center\">\n\n![logo](assets/iconsmall.png)\n\n# Learsim Control Panel\n</div>\n\n## Purpose\nLearsim Control Panel for managing mostly Arduino clients for FS2020. \nUsing [Learsim Backend](https://github.com/Learsim/LearsimSimulatorBackend).\n\n## Usage\n### Map\nThe map needs a [Mapbox access token](https://account.mapbox.com/access-tokens/) to function. The \"Default public token\" works great. This is configured in the settings.\n\n### Connection to server\nThe defualt settings for the server is port 8888 then the IP or hostname to the machine running the backend.\n\n\n## Development \n\n#### Clone repo\n```bash\ngit clone https://github.com/Learsim/ControlPanel.git\n```\n#### Go to directory\n```bash\ncd ControlPanel\n```\n#### Run yarn\n```bash\nyarn\n```\n#### Run yarn start\n```bash\nyarn start\n```\n### Using the mock API\nThe mock API is right now deprecated. The old one was written in python, this will maybe rewritten soon for easier development in that case\n#### Running th emoch API\n```bash\ncd src\\mockapi\n```\nIf first time\n```bash\npython3 -m venv .\n```\nThen\n```bash\n.\\Scripts\\activate\npip install -r requirements.txt\npython mockAPI.py\n```\n\n## Screenshots\n![image](https://user-images.githubusercontent.com/54435884/125946731-d469f18f-5c27-4928-91f6-71ec0ebdd33b.png)\n![image](https://user-images.githubusercontent.com/54435884/125946760-9902d96a-6ad3-445b-bb60-99e67d02a416.png)\n![image](https://user-images.githubusercontent.com/54435884/125946780-73e835d5-4f31-40ec-a684-c078cb92ec5a.png)\n![image](https://user-images.githubusercontent.com/54435884/125946807-2e9d7870-d906-4483-ae59-86e1226969a7.png)\n![image](https://user-images.githubusercontent.com/54435884/125946845-69313400-759b-4313-bf9d-3843ec821668.png)\n\n\n## Credits\n\n#### [Electron React Boilerplate](https://github.com/electron-react-boilerplate/electron-react-boilerplate)\n" }, { "alpha_fraction": 0.7156862616539001, "alphanum_fraction": 0.7156862616539001, "avg_line_length": 24.5, "blob_id": "030b3772e8f973fae758d20b28951dd1ca9e60b3", "content_id": "297fac6cd8f3baf62875c9336068b4555a305a99", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 306, "license_type": "permissive", "max_line_length": 75, "num_lines": 12, "path": "/src/API/Status.ts", "repo_name": "Learsim/LearsimControlPanel", "src_encoding": "UTF-8", "text": "import axios from 'axios';\nimport { statusEndpoint } from './API.Common';\n\nexport interface Status {\n SimConnection: boolean;\n}\n\nexport default function getStatus(hostname: string): Promise<Status> {\n return axios.get<Status>(hostname.concat(statusEndpoint)).then((res) => {\n return res.data;\n });\n}\n" }, { "alpha_fraction": 0.704623281955719, "alphanum_fraction": 0.704623281955719, "avg_line_length": 20.629629135131836, "blob_id": "c44f078fea5e4b65e2c9f5d30cef125b0bd7a34f", "content_id": "912cbb2ffc6850a74b4af4765c0c06b0427eff31", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 1168, "license_type": "permissive", "max_line_length": 78, "num_lines": 54, "path": "/src/API/Clients.ts", "repo_name": "Learsim/LearsimControlPanel", "src_encoding": "UTF-8", "text": "import axios from 'axios';\nimport Arduino from '../Arduino/Arduino.Common';\nimport { clientEndpoint, clientsEndpoint } from './API.Common';\n\nexport interface Client {\n IsOpen: boolean;\n Config: Config;\n}\nexport interface Config {\n ConnectionType: ConnectionType;\n guid: string;\n Name: string;\n Adress: string;\n StaticPort: boolean;\n Baud: number;\n Description: string;\n Port: number;\n bindings: Binding[];\n}\nexport interface Binding {\n ValueName: string;\n Type: number;\n Input: boolean;\n SimVar: SimVar;\n UpdateRate: number;\n}\nexport interface SimVar {\n Identfier: string;\n Index: number;\n}\n\nexport default function getClients(hostname: string): Promise<Client[]> {\n return axios.get<Client[]>(hostname.concat(clientsEndpoint)).then((res) => {\n return res.data;\n });\n}\nexport function AddClient(hostname: string, arduino: Arduino) {\n console.log(JSON.stringify(arduino));\n return axios\n .post(hostname.concat(clientEndpoint), JSON.stringify(arduino))\n .then((res) => {\n return res.data;\n });\n}\nexport enum ConnectionType {\n SERIAL,\n TCP,\n UDP,\n}\nexport enum ConnectionState {\n Disconnected,\n Connected,\n ConnectionError,\n}\n" }, { "alpha_fraction": 0.7129455804824829, "alphanum_fraction": 0.7129455804824829, "avg_line_length": 21.20833396911621, "blob_id": "d0ac71bc41bf201bc1b0f4d138e79da63074de97", "content_id": "23b96d5975a0ffcdc99664fe6e0bbdd1d2aac585", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 533, "license_type": "permissive", "max_line_length": 76, "num_lines": 24, "path": "/src/API/SimVarValues.ts", "repo_name": "Learsim/LearsimControlPanel", "src_encoding": "UTF-8", "text": "import axios from 'axios';\nimport { valuesEndpoint } from './API.Common';\n\nexport interface Values {\n SimVars: SimVarValue[];\n LearVars: LearVar[];\n}\nexport interface SimVarValue {\n Key: SimVar;\n Value: string;\n}\nexport interface SimVar {\n Identfier: string;\n Index: number;\n}\nexport interface LearVar {\n Identifier: string;\n Value: string;\n}\nexport default function getSimVarValues(hostname: string): Promise<Values> {\n return axios.get<Values>(hostname.concat(valuesEndpoint)).then((res) => {\n return res.data;\n });\n}\n" }, { "alpha_fraction": 0.6572580933570862, "alphanum_fraction": 0.6693548560142517, "avg_line_length": 18.076923370361328, "blob_id": "6e8698394923056e35604268f9aaf922752cf570", "content_id": "a9e66749c96dd9d74950ec4ce448a5de788b5297", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 248, "license_type": "permissive", "max_line_length": 50, "num_lines": 13, "path": "/src/Arduino/Arduino.Common.ts", "repo_name": "Learsim/LearsimControlPanel", "src_encoding": "UTF-8", "text": "import { v4 as uuidv4 } from 'uuid';\nimport ArduinoComponent from './ArduinoComponent';\n\nexport default class Arduino {\n UUID: string;\n\n Components: ArduinoComponent[];\n\n constructor() {\n this.UUID = uuidv4();\n this.Components = [];\n }\n}\n" }, { "alpha_fraction": 0.6797945499420166, "alphanum_fraction": 0.6969178318977356, "avg_line_length": 44, "blob_id": "2600524d299bc739eb87c4b370ef37431a94965d", "content_id": "20a09c6b5954590be9ca1d5ba9f8732e716b9826", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 584, "license_type": "permissive", "max_line_length": 257, "num_lines": 13, "path": "/src/ExtractSimVars.py", "repo_name": "Learsim/LearsimControlPanel", "src_encoding": "UTF-8", "text": "from bs4 import BeautifulSoup\nimport json\nsimvarFile = open(\"./simvars.html\")\nsimVarText = simvarFile.read()\nbsoup = BeautifulSoup(simVarText,features=\"html.parser\")\nsimVars = bsoup.find_all(\"tr\")\nsimvarsobject = []\nfor i in range(1,len(simVars)):\n simvarsobject.append({\"simvar\":str(simVars[i].find_all(\"td\")[0].find(\"code\").contents[0]),\"desc\":str(simVars[i].find_all(\"td\")[1].contents[0]),\"units\" : str(simVars[i].find_all(\"td\")[2].contents[0]),\"type\":str(simVars[i].find_all(\"td\")[3].contents[0])})\n\nf = open(\"simvarsout.json\", \"a\")\nf.write(json.dumps(simvarsobject))\nf.close()" }, { "alpha_fraction": 0.6816608905792236, "alphanum_fraction": 0.6816608905792236, "avg_line_length": 31.11111068725586, "blob_id": "0741f792e930b62315a81fffe7e05231a8c330a1", "content_id": "cb6e488915db12981ded98abb6518035686c99a5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 289, "license_type": "permissive", "max_line_length": 74, "num_lines": 9, "path": "/src/API/Nodes.ts", "repo_name": "Learsim/LearsimControlPanel", "src_encoding": "UTF-8", "text": "import axios from \"axios\";\nimport { nodesEndpoint } from \"./API.Common\";\nimport { Node } from \"../Helpers/Nodes\";\n\nexport default function getNodes(hostname: string): Promise<Node[]> {\n return axios.get<Node[]>(hostname.concat(nodesEndpoint)).then((res) => {\n return res.data;\n });\n}\n" }, { "alpha_fraction": 0.6158536672592163, "alphanum_fraction": 0.6158536672592163, "avg_line_length": 15.399999618530273, "blob_id": "1ee7066fcc712b16f4d8000ecd72a3c315614e4f", "content_id": "6086033f2fdb0a186da282273b7d0a30bec84dc0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 164, "license_type": "permissive", "max_line_length": 45, "num_lines": 10, "path": "/src/Arduino/Output/NeoPixel.ts", "repo_name": "Learsim/LearsimControlPanel", "src_encoding": "UTF-8", "text": "export default class NeoPixel {\n Pos: number;\n\n Color: number[];\n\n constructor(Color: number[], Pos: number) {\n this.Color = Color;\n this.Pos = Pos;\n }\n}\n" }, { "alpha_fraction": 0.593961238861084, "alphanum_fraction": 0.6002703905105591, "avg_line_length": 26.737499237060547, "blob_id": "d3c7442f357c01a5e2df514f79d2b3b62886a96d", "content_id": "fcc77b68e572b59995b3194ca41eaf351e6254ab", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 2219, "license_type": "permissive", "max_line_length": 71, "num_lines": 80, "path": "/src/API/ArduinoGenerator.ts", "repo_name": "Learsim/LearsimControlPanel", "src_encoding": "UTF-8", "text": "/* eslint-disable prefer-const */\nimport Arduino from '../Arduino/Arduino.Common';\nimport { ComponentType } from '../Arduino/ArduinoComponent';\nimport EightSegDisplay from '../Arduino/Output/8SegDisplay';\nimport Display from '../Arduino/Output/Display';\nimport Led from '../Arduino/Output/Led';\nimport NeoPixelStrip from '../Arduino/Output/NeoPixelStrip';\n\nconst BaseCode = `{CR}#include \"Learsim.h\"\n#include <ArduinoJson.h>\n{INCLUDES}\n{INIT}\nMessangeHandler msghndlr;\nvoid setup()\n{{SETUP}\n msghndlr.Init(2048);\n Serial.begin(9600);\n}\nvoid loop()\n{{LOOP}if (Serial.available())\n {\n if (msghndlr.DeserializeJson())\n {{DESJSON}}\n }\n}\n{CUSTOMFUNCS}\n`;\nexport default function GenerateArduinoCode(arduino: Arduino): string {\n let Leds: Led[] = [];\n let Displays: Display[] = [];\n let NeoPixels: NeoPixelStrip[] = [];\n let EightSegDisplays: EightSegDisplay[] = [];\n let Init = '';\n let Setup = '';\n let CopyRight = '';\n let CustomFuncs = '';\n let Loop = '';\n let Includes = '';\n let DesJson = '';\n Init += `string id = \"${arduino.UUID}\";`;\n arduino.Components.forEach((element) => {\n switch (element.Type) {\n case ComponentType.Display:\n Displays.push(element);\n break;\n case ComponentType.Led:\n Leds.push(element);\n break;\n case ComponentType.NeoPixelStrip:\n NeoPixels.push(element);\n break;\n case ComponentType.EightSegDisplay:\n EightSegDisplays.push(element);\n break;\n default:\n break;\n }\n });\n if (Leds.length > 1) {\n Init += `\\nLed leds[${Leds.length}]\\n`;\n Setup += `\\n ${Leds.map(\n (led, index) =>\n `\\n leds[${index}] = new Led(${\n led.Pin\n },\"${`${led.SimVar.Identfier}:${led.SimVar.Index}`}\");`\n ).join('')} `;\n } else if (Leds.length === 1) {\n Init += `\\nLed led;`;\n Setup += `\\n led = new Led(${\n Leds[0].Pin\n },\"${`${Leds[0].SimVar.Identfier}:${Leds[0].SimVar.Index}`}\");`;\n }\n return BaseCode.replace('{INIT}', Init)\n .replace('{SETUP}', Setup)\n .replace('{CR}', CopyRight)\n .replace('{CUSTOMFUNCS}', CustomFuncs)\n .replace('{LOOP}', Loop)\n .replace('{INCLUDES}', Includes)\n .replace('{DESJSON}', DesJson);\n}\n" }, { "alpha_fraction": 0.6739726066589355, "alphanum_fraction": 0.6739726066589355, "avg_line_length": 16.380952835083008, "blob_id": "208c8bb38c5b325e24e3fe4464bc718ff57a1e24", "content_id": "6a705f6cf25be9b30d018d42206fa01c8a41b443", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 365, "license_type": "permissive", "max_line_length": 65, "num_lines": 21, "path": "/src/Arduino/ArduinoComponent.ts", "repo_name": "Learsim/LearsimControlPanel", "src_encoding": "UTF-8", "text": "import { SimVar } from '../API/Clients';\n\nexport enum ComponentType {\n EightSegDisplay,\n Display,\n Led,\n NeoPixelStrip,\n}\nexport default class ArduinoComponent {\n Pin: number;\n\n SimVar: SimVar;\n\n Type: ComponentType;\n\n constructor(pin: number, simVar: SimVar, type: ComponentType) {\n this.Pin = pin;\n this.SimVar = simVar;\n this.Type = type;\n }\n}\n" }, { "alpha_fraction": 0.7592592835426331, "alphanum_fraction": 0.7608024477958679, "avg_line_length": 39.5, "blob_id": "458f3721a116ebd5e09fd2e6411810b064e6e8ee", "content_id": "df208ee656627413fcdc0758f9914eb94f8044a3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 648, "license_type": "permissive", "max_line_length": 69, "num_lines": 16, "path": "/src/API/API.Common.ts", "repo_name": "Learsim/LearsimControlPanel", "src_encoding": "UTF-8", "text": "import axios, { AxiosRequestConfig } from 'axios';\n\nexport const statusEndpoint = 'api/status';\nexport const connectEndpoint = 'api/simconnect/connect';\nexport const clientsEndpoint = 'api/clients';\nexport const startSimEndpoint = 'api/startSim';\nexport const clientEndpoint = 'api/client';\nexport const valuesEndpoint = 'api/getValues';\nexport const enumEndpoint = 'api/getEnums';\nexport const nodesEndpoint = 'api/nodes';\nexport const AxiosAPIConfig: AxiosRequestConfig = { timeout: 2 };\nexport default function startSimulator(hostname: string) {\n return axios.get(hostname.concat(startSimEndpoint)).then((res) => {\n return res.data;\n });\n}\n" } ]
14
alvsgithub/erp5
https://github.com/alvsgithub/erp5
f254903679e7d16076671dce66759f6c74ff3566
22887cde63f9099258ebf910f3662aa5680d5cdf
50bba4746d7d34b116b729486f3ad876e0b16e51
refs/heads/master
2020-12-28T23:46:45.850199
2014-10-23T13:16:01
2014-10-23T13:37:39
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5213834047317505, "alphanum_fraction": 0.5288211107254028, "avg_line_length": 36.887325286865234, "blob_id": "ede1655389e8a74dd8d8d2e52600db77d0008902", "content_id": "57955ae8224339f310bd3c0e02d4ca46059a22ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2689, "license_type": "no_license", "max_line_length": 79, "num_lines": 71, "path": "/product/ERP5ShortMessage/PropertySheet/SMSGateway.py", "repo_name": "alvsgithub/erp5", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n##############################################################################\n#\n# Copyright (c) 2010 Nexedi SA and Contributors. All Rights Reserved.\n# Francois-Xavier Algrain <[email protected]>\n#\n# WARNING: This program as such is intended to be used by professional\n# programmers who take the whole responsability of assessing all potential\n# consequences resulting from its eventual inadequacies and bugs\n# End users who are looking for a ready-to-use solution with commercial\n# garantees and support are strongly adviced to contract a Free Software\n# Service Company\n#\n# This program is Free Software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.\n#\n##############################################################################\n\nclass SMSGateway:\n \"\"\"\n Agent properties for Agent objects\n \"\"\"\n\n _properties = (\n { 'id' : 'gateway_user'\n , 'description': 'User name to connect '\n , 'type' : 'string'\n , 'mode' : 'w'\n },\n { 'id' : 'gateway_password'\n , 'description': 'Password to connect'\n , 'type' : 'string'\n , 'mode' : 'w'\n },\n { 'id' : 'gateway_account'\n , 'description': 'Account to use.'\n , 'type' : 'string'\n , 'mode' : 'w'\n },\n { 'id' : 'gateway_account_id'\n , 'description': 'Id of the accound. Can be used for push notification'\n , 'type' : 'string'\n , 'mode' : 'w'\n },\n { 'id' : 'default_sender'\n , 'description': 'Default sender when send message.'\n , 'type' : 'string'\n , 'mode' : 'w'\n },\n { 'id' : 'simulation_mode'\n , 'description': 'Force the simulation mode.'\n , 'type' : 'boolean'\n , 'mode' : 'w'\n },\n { 'id' : 'title_mode'\n , 'description': 'Allow or not to send by title'\n , 'type' : 'boolean'\n , 'mode' : 'w'\n },\n )" }, { "alpha_fraction": 0.5397371649742126, "alphanum_fraction": 0.5419273972511292, "avg_line_length": 39.45569610595703, "blob_id": "897dcd26b0b9de1fc925b470cddf8f59d8e13aca", "content_id": "910cb02bf2399ba4b17d46d43f73615d8d73b9b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3196, "license_type": "no_license", "max_line_length": 81, "num_lines": 79, "path": "/product/ERP5Form/GadgetField.py", "repo_name": "alvsgithub/erp5", "src_encoding": "UTF-8", "text": "from Products.Formulator.Field import ZMIField\nfrom Products.Formulator import Widget\nfrom Products.Formulator.DummyField import fields\nfrom Products.Formulator import Validator\nfrom zLOG import LOG\n\nclass GadgetWidget(Widget.TextWidget):\n \"\"\"\n A widget that displays a renderjs gadget\n \"\"\"\n property_names = Widget.TextWidget.property_names + \\\n ['gadget_html', 'gadget_cached', 'gadget_cache_id', 'gadget_property',\n 'gadget_connection', 'gadget_id']\n\n gadget_html = fields.StringField('gadget_html',\n title='Gadget Html',\n description=(\"The id of the html page containing the \\\n gadget\"),\n default='',\n required=0)\n\n gadget_id = fields.StringField('gadget_id',\n title='Gadget Id',\n description=(\"The id of the gadget\"),\n default='',\n required=0)\n\n gadget_cache_id = fields.StringField('gadget_cache_id',\n title='Gadget Cache Id',\n description=(\"The id of the cache in localstorage\"),\n default='',\n required=0)\n\n gadget_property = fields.StringField('gadget_property',\n title='Gadget Properties',\n description=(\"Json Data used to initialize the gadget\"),\n default='',\n required=0)\n\n gadget_connection = fields.StringField('gadget_connection',\n title='Gadget Connections',\n description=(\"Json Data used to define interactions\"),\n default='',\n required=0)\n\n gadget_cached = fields.CheckBoxField('gadget_cached',\n title='Gadget Cached',\n description=(\"The rendering of the gadget will be \\\n cached in localstorage.\"),\n default=0,\n required=0)\n\n def render(self, field, key, value, REQUEST, render_prefix=None):\n return self.render_view(field, value, REQUEST, render_prefix)\n\n def render_view(self, field, value, REQUEST=None, render_prefix=None):\n kw = {}\n gadget_mapping = {\"gadget_cached\": \"data-gadget-cacheable\",\n \"gadget_cache_id\": \"data-gadget-cache-id\",\n \"gadget_html\": \"data-gadget\",\n \"gadget_id\": \"id\",\n \"gadget_connection\": \"data-gadget-connection\",\n \"gadget_property\": \"data-gadget-property\"}\n for property_name in gadget_mapping.keys():\n property_value = field.get_value(property_name)\n if property_value or property_name==\"gadget_html\":\n kw[gadget_mapping[property_name]] = property_value\n return Widget.render_element(\"div\",\n **kw)\n\nGadgetWidgetInstance = GadgetWidget()\n\nclass GadgetField(ZMIField):\n \"\"\" Gadget field\n \"\"\"\n meta_type = \"GadgetField\"\n\n widget = GadgetWidgetInstance\n validator = Validator.SuppressValidatorInstance\n" } ]
2
Tkocz/pysteam
https://github.com/Tkocz/pysteam
8d1f1f9ff9a3c9932462cb2cd231d02af4c45079
df9d929ea4e8d43762add346fae3d557cfecff74
7b5c41ff35def855c804ebbb7fab3af61290ea85
refs/heads/master
2021-03-16T09:07:38.577565
2017-08-30T12:32:01
2017-08-30T12:32:01
76,860,176
1
2
null
null
null
null
null
[ { "alpha_fraction": 0.6268656849861145, "alphanum_fraction": 0.6398442387580872, "avg_line_length": 37.525001525878906, "blob_id": "95ed05f96c675ba28486addba09b35dccb78ef13", "content_id": "937c273390a7af5d18dd063c3b4ade01e6bc66b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3082, "license_type": "no_license", "max_line_length": 141, "num_lines": 80, "path": "/Experiment1enhncd.py", "repo_name": "Tkocz/pysteam", "src_encoding": "UTF-8", "text": "from pyspark.sql.types import *\nimport CollaborativeFiltering as CF\nimport ContentBasedFiltering as CBF\nimport pandas as pd\nfrom tqdm import *\nimport numpy as np\nfrom time import localtime, strftime\nimport CheckCSV\n\ncf = CF.CollaborativFiltering()\ncbf = CBF.ContentBasedFiltering()\ncsv = CheckCSV.CheckCSV()\nschema = StructType([\n StructField(\"steamid\", IntegerType()),\n StructField(\"appid\", IntegerType()),\n StructField(\"rating\", DoubleType())\n])\n\n# set envparam PYSPARK_PYTHON = python3\nFILE_SIZE = 100\nITER = 1\nNFOLDS = 10\nMIN_GAMES = 2\n\ndataset = cf.spark.read.csv('Resources/formateddataset{0}.csv.gz'.format(FILE_SIZE), header=True, schema=schema)\nprint(\"OnUsers: \", dataset.select('steamid').distinct().count())\nnGames = dataset[dataset.rating == 1.0].groupBy('steamid').count().filter('count>=' + str(MIN_GAMES))\ndataset = dataset.join(nGames, 'steamid').select('steamid', 'appid', 'rating')\ndataset.cache()\ncbf.readsimilaritymatrix(FILE_SIZE)\nprint(\"nUsers: \", dataset.select('steamid').distinct().count())\nprint(\"nApps: \", dataset.select('appid').distinct().count())\ncf.setOptParams()\n\"\"\"ParamOpt\"\"\"\n# (training, validation) = dataset.randomSplit([0.9, 0.1])\n# (train, test) = training.randomSplit([0.8, 0.2])\n# cf.paramOpt(validation, 2, 10)\n\nresult = pd.DataFrame()\nfolds = [(1.0 / NFOLDS)] * NFOLDS\n\nfor i in tqdm(range(ITER)):\n\n splits = dataset.randomSplit(folds)\n\n #TODO: fix stratefied fold with even distribution\n\n for fold, test in enumerate(tqdm(splits)):\n\n nUsers = test.select(test.steamid).where(test.rating == 1).distinct().count()\n sampledtest = cf.takeSamples(test)\n train = dataset.subtract(test)\n ones = train.toPandas()\n ones = ones.where(ones.rating == 1)\n cbf_pred = cbf.predict(ones)\n cf.fit(train)\n cf_df = cf.predict(test)\n pd_users = sampledtest.toPandas()\n del pd_users['rating']\n cf_pred = cf_df.toPandas()\n cf_pred[['steamid', 'appid']] = cf_pred[['steamid', 'appid']].astype(int)\n iterators = {'cbf': cbf_pred, 'cf': cf_pred}\n\n for type, data in iterators.items():\n\n subset = data.where((data.rating == 0.0) | (\n (data.steamid.isin(pd_users.steamid)) & (data.appid.isin(pd_users.appid)))).dropna()\n subset['rank'] = subset.groupby('steamid').cumcount() + 1\n targets = subset.merge(pd_users, how='inner', on=('steamid', 'appid'))\n targets[['steamid', 'appid']] = targets[['steamid', 'appid']].astype(int)\n targets.insert(0, 'iter', i + 1)\n targets.insert(1, 'fold', fold + 1)\n targets.insert(2, 'type', type)\n #targets = targets.merge(nGames, how='inner', on='steamid')\n result = result.append(targets)\n break\nresult = result.sort_values(by=['iter', 'fold', 'steamid', 'rating'], ascending=[True, True, True, False])\nprint(result)\nresult.to_csv('ExperimentData/E1-{0}-{1}-{2}-{3}-{4}.csv.gz'.format(FILE_SIZE, ITER, NFOLDS, MIN_GAMES, strftime(\"%Y%m%d%H%M\", localtime())),\n compression='gzip')\n" }, { "alpha_fraction": 0.515195369720459, "alphanum_fraction": 0.5976845026016235, "avg_line_length": 42.25, "blob_id": "389e1fe797704eeff68be84c085c8b1eddb79879", "content_id": "5ebb6b047fc77b7fbac3dcbf498c367260818520", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 691, "license_type": "no_license", "max_line_length": 184, "num_lines": 16, "path": "/TestRank.py", "repo_name": "Tkocz/pysteam", "src_encoding": "UTF-8", "text": "from Ranking import *\nimport pandas as pd\n\nrank = Rank()\npredictions = pd.DataFrame([[1, 1, 1.0, 1.0], [1, 2, 0.0, 0.5], [1, 3, 0.0, 0.1], [2, 2, 1.0, 0.5],\n [2, 3, 0.0, 0.1], [3, 1, 0.0, 1.0], [3, 2, 0.0, 0.5], [3, 3, 1.0, 0.1]], columns=['steamid', 'appid', 'rating', 'prediction'])\nusers = predictions.where(predictions.rating == 1.0)\n\npredictions = predictions.append(pd.DataFrame([[2, 1, 1.0, 1.0]], columns=['steamid', 'appid', 'rating', 'prediction'])).sort_values(['steamid', 'prediction'], ascending=[True, False])\ndel users['rating']\ndel users['prediction']\nprint(predictions)\ndict = {'test': predictions}\nresult = rank.rank(dict, users, 0)\n\nprint(result)" }, { "alpha_fraction": 0.5561694502830505, "alphanum_fraction": 0.6694291234016418, "avg_line_length": 40.769229888916016, "blob_id": "51d1d43c028f1e9e612fe17ed6183e7c18eddc2e", "content_id": "b7fd6180c40b00978a946b3a3461cb2a5e01da13", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1086, "license_type": "no_license", "max_line_length": 120, "num_lines": 26, "path": "/CollectSurveyData.py", "repo_name": "Tkocz/pysteam", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pandas as pd\nfrom steamwebapi.api import IPlayerService\n\nplayerserviceinfo = IPlayerService()\n\ngames = ['72850', '252950', '730', '271590', '294100', '245620', '292030', '482730', '289070', '47890', '570', '268500',\n '359550', '346110', '8500', '102600', '379720', '251470', '70600', '620']\n\nstars = {\"Don't own this game on steam\": 0, \"Don't own this game\": 0, \"Disliked strongly\": 1, \"Disliked\": 2,\n \"Liked slightly\": 3, \"Liked\": 4, \"Liked strongly\": 5, \"Haven't played / No opinion\": 0}\n\nconvertfunc = lambda x: stars[x]\ndataset = pd.read_csv('Resources/Steam Game Ranking.csv', usecols=range(1, 22), index_col=[0], header=None,\n skiprows=[0])\ndataset.columns = games\ndataset.index.names = ['steamid']\ndataset = dataset.applymap(convertfunc)\ndataset = dataset.applymap(np.int64)\ndataset = dataset.replace(0, np.nan)\nsdf = dataset.to_sparse()\nprint(dataset)\nprint('nUsers:', len(dataset.index), 'Sparsity:', 1 - sdf.density, 'Density:', sdf.density)\nprint(dataset.describe())\n\ndataset.to_csv('Resources/userratings.csv')\n" }, { "alpha_fraction": 0.6678754687309265, "alphanum_fraction": 0.6832879781723022, "avg_line_length": 36.602272033691406, "blob_id": "daad1ccdcdc27e008d4dc80b00e347ea1915a18c", "content_id": "0ddba108166dc5a23a448077e552316b082cc740", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3309, "license_type": "no_license", "max_line_length": 129, "num_lines": 88, "path": "/Experiment1noncv.py", "repo_name": "Tkocz/pysteam", "src_encoding": "UTF-8", "text": "from pyspark.sql.functions import explode, udf\nfrom pyspark.sql.types import *\nimport CollaborativeFiltering as CF\nimport ContentBasedFiltering as CBF\nimport pandas as pd\nimport sklearn\nfrom tqdm import *\nimport numpy as np\nfrom time import localtime, strftime\nimport CheckCSV\nfrom Ranking import *\nfrom multiprocessing import Pool\nfrom pyspark.sql import functions as F\n\n#http://127.0.0.1:4040/jobs/\n\ncf = CF.CollaborativFiltering()\ncbf = CBF.ContentBasedFiltering()\ncsv = CheckCSV.CheckCSV()\nrank = Rank()\nschema = StructType([\n StructField(\"steamid\", IntegerType()),\n StructField(\"appid\", IntegerType()),\n StructField(\"rating\", DoubleType())\n])\n\n# set envparam PYSPARK_PYTHON = python3\nFILE_SIZE = 10000\nITER = 30\nMIN_GAMES = 2\nNUM_PARTITIONS = 10\nNUM_CORES = 8\n\n\ndataset = cf.spark.read.csv('Resources/formateddataset{0}.csv.gz'.format('PTime10000'), header=True, schema=schema)\nprint(\"OnUsers: \", dataset.select('steamid').distinct().count())\nnGames = dataset[dataset.rating == 1.0].groupBy('steamid').count().filter('count>=' + str(MIN_GAMES))\ndataset = dataset.join(nGames, 'steamid').select('steamid', 'appid', 'rating')\ndataset.cache()\ncbf.readsimilaritymatrix(FILE_SIZE)\nprint(\"nUsers: \", dataset.select('steamid').distinct().count())\nprint(\"nApps: \", dataset.select('appid').distinct().count())\n\n\ncf.setOptParams()\n\"\"\"ParamOpt\"\"\"\n#(dataset, validation) = dataset.randomSplit([0.9, 0.1])\n#cf.paramOpt(validation, 2, 10)\n\nresult = pd.DataFrame()\nfor i in tqdm(range(ITER), leave=True):\n nUsers = dataset.select(dataset.steamid).where(dataset.rating == 1).distinct().count()\n test = cf.takeSamples(dataset)\n #print(test.show())\n #print(dataset.where(\"steamid=1\" and \"rating=1.0\").show())\n train = dataset.subtract(test)\n ones = train.where(dataset.rating == 1)\n pdones = ones.toPandas()\n gb = pdones.groupby(by=['steamid'], as_index=False)\n dataframe = pd.DataFrame([i for i in gb])\n del dataframe[0]\n cbftest = dataframe.values.flatten()\n split = np.array_split(cbftest, NUM_PARTITIONS)\n cbf_pred = pd.DataFrame(columns=['steamid', 'appid', 'rating', 'prediction'])\n for r in tqdm(split):\n pool = Pool(NUM_CORES)\n cbf_pred = cbf_pred.append(pool.map(cbf.predict, r))\n pool.close()\n pool.join()\n cbf_pred[['steamid', 'appid']] = cbf_pred[['steamid', 'appid']].astype(int)\n cf_shit = dataset.subtract(ones)\n cf.fit(train)\n #userrecs = cf.model.recommendForAllUsers(dataset.select('appid').distinct().count())\n #cf_df = userrecs.withColumn(\"recommendations\", explode('recommendations')).selectExpr(\"steamid\", \"recommendations.*\")\n #cf_df.sort(['steamid', 'prediction'], ascending=[True, False])\n cf_df = cf.predict(cf_shit)\n pd_users = test.toPandas()\n del pd_users['rating']\n cf_pred = cf_df.toPandas()\n cf_pred[['steamid', 'appid']] = cf_pred[['steamid', 'appid']].astype(int)\n predictions = {'cbf': cbf_pred, 'cf': cf_pred}\n targets = rank.rank(predictions, pd_users, i)\n result = result.append(targets)\n\nresult = result.sort_values(by=['iter', 'steamid', 'rating'], ascending=[True, True, False])\nprint(result)\nresult.to_csv('ExperimentData/E1-{0}-{1}-{2}-{3}.csv.gz'.format(FILE_SIZE, ITER, MIN_GAMES, strftime(\"%Y%m%d%H%M\", localtime())),\n compression='gzip')\n" }, { "alpha_fraction": 0.5823556780815125, "alphanum_fraction": 0.610980749130249, "avg_line_length": 37.05356979370117, "blob_id": "93604313c571c24ff0e69f9640257e588eb407a8", "content_id": "53cece291aa1ac4fc866447fa8814cadb2f8b478", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4322, "license_type": "no_license", "max_line_length": 118, "num_lines": 112, "path": "/Experiment1.py", "repo_name": "Tkocz/pysteam", "src_encoding": "UTF-8", "text": "import datetime\n\nfrom pyspark.sql import Window\nfrom pyspark.sql.types import *\nimport CollaborativeFiltering as CF\nimport ContentBasedFiltering as CBF\nfrom pyspark.sql.functions import broadcast\nimport pandas as pd\nfrom tqdm import *\nimport numpy as np\n\ncf = CF.CollaborativFiltering()\ncbf = CBF.ContentBasedFiltering()\n\n# recommenders = {'cf': cf, 'cbf': cbf}\n\nschema = StructType([\n StructField(\"steamid\", IntegerType()),\n StructField(\"appid\", IntegerType()),\n StructField(\"rating\", DoubleType())\n])\n\n# set envparam PYSPARK_PYTHON = python3\n\nFILE_SIZE = 100\nITER = 2\nNFOLDS = 10\n\ndataset = cf.spark.read.csv('Resources/formateddataset{0}.csv.gz'.format(FILE_SIZE), header=True, schema=schema)\n\nappnames = cf.spark.read.csv('Resources/allgames.csv.gz', header=True, inferSchema=True)\ncbf.readsimilaritymatrix(FILE_SIZE)\ncf.setOptParams()\n# apps = dataset.toPandas()\n# cbf.generateGameGenreMatrix(apps, save=True, file_size=FILE_SIZE)\n# cbf.generateSimMatrix(cbf.gm, save=True, file_size=FILE_SIZE)\n\n\"\"\"ParamOpt\"\"\"\n# (training, validation) = dataset.randomSplit([0.9, 0.1])\n# (train, test) = training.randomSplit([0.8, 0.2])\n# cf.paramOpt(validation, 2, 10)\n\n# 10 fold\n# foreach fold do everithing below\nresult = pd.DataFrame()\nfolds = [(1.0 / NFOLDS)] * NFOLDS\nfor i in tqdm(range(ITER)):\n\n splits = dataset.randomSplit(folds)\n\n for fold, split in enumerate(tqdm(splits)):\n\n bSplit = broadcast(split)\n nUsers = bSplit.select(bSplit.steamid).where(bSplit.rating == 1).distinct().count()\n users = cf.takeSamples(bSplit, nUsers)\n busers = broadcast(users)\n # users.show()\n cbftest = bSplit.subtract(users)\n bcbftest = broadcast(cbftest)\n preds = cbf.predict(bcbftest.toPandas(), 0)\n cbf_df = cf.spark.createDataFrame(preds)\n bCbf_df = broadcast(cbf_df)\n train = dataset.subtract(bSplit)\n cf.fit(train)\n cf_df = cf.predict(bSplit)\n bCf_df = broadcast(cf_df)\n\n for user in busers.collect():\n\n prediction = -1.0\n cf_sel = bCf_df.where((bCf_df.steamid == user.steamid) & (bCf_df.appid == user.appid)) # .collect()[0]\n cf_count = bCf_df.where((bCf_df.steamid == cf_sel.first().steamid) & (bCf_df.rating != 1) & (\n bCf_df.prediction > cf_sel.first().prediction)).count()\n cbf_sel = bCbf_df.where((bCbf_df.steamid == user.steamid) & (bCbf_df.appid == user.appid)) # .collect()\n\n if cbf_sel.first() is not None:\n cbf_count = bCbf_df.where(\n (bCbf_df.steamid == cbf_sel.first().steamid) & (\n bCbf_df.prediction > cbf_sel.first().prediction)).count()\n prediction = cbf_sel.first().prediction\n else:\n cbf_count = bCbf_df.where(bCbf_df.steamid == user.steamid).count()\n result = result.append(\n pd.DataFrame([[int(i + 1), int(fold + 1), 'CF', int(user.steamid), int(user.appid), int(user.rating),\n float(cf_sel.first().prediction), int(cf_count + 1)]]))\n result = result.append(\n pd.DataFrame([[int(i + 1), int(fold + 1), 'CBF', int(user.steamid), int(user.appid), int(user.rating),\n float(prediction), int(cbf_count + 1)]]))\n result.columns = ['iter', 'fold', 'type', 'steamid', 'appid', 'rating', 'prediction', 'rank']\n print(result)\n break\nresult.columns = ['iter', 'fold', 'type', 'steamid', 'appid', 'rating', 'prediction', 'rank']\nprint(result)\nresult.to_csv('ExperimentData/E1-{0}-{1}-{2}r2.csv.gz'.format(FILE_SIZE, ITER, NFOLDS), compression='gzip')\n\n# user = dataset[dataset.steamid == 11]\n# cbf.predict(user, 20).join(appnames, on=['appid'], how='left').show()\n# cf.predict(user).join(appnames, on=['appid'], how='left').show()\n# Show predictions\n# cbf_df.join(appnames, on=['appid'], how='left').show()\n# cf_df.join(appnames, on=['appid'], how='left').show()\n\ncf.spark.stop()\n\n# E1-100-2-10 Time\n# iter- 1 100%|██████████| 10/10 [38:32<00:00, 240.82s/it]\n# iter- 2 100%|██████████| 10/10 [42:30<00:00, 254.83s/it]\n# total 100%|██████████| 2/2 [1:21:02<00:00, 2384.01s/it]\n# E1-250-2-10\n# iter- 1\n# iter- 2\n# total\n" }, { "alpha_fraction": 0.5703714489936829, "alphanum_fraction": 0.592280924320221, "avg_line_length": 36.46195602416992, "blob_id": "e790fef5f242ecca8afef491f2c3f44579995f05", "content_id": "b9f120bc2c6d522acbc7350299dd766dbddc19ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6892, "license_type": "no_license", "max_line_length": 136, "num_lines": 184, "path": "/ContentBasedFiltering.py", "repo_name": "Tkocz/pysteam", "src_encoding": "UTF-8", "text": "from __future__ import print_function\nimport itertools\nfrom scipy.spatial.distance import cosine\n\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.functions import broadcast\nimport pandas as pd\nimport numpy as np\nimport steamfront\nimport requests\nfrom tqdm import *\n\nclass ContentBasedFiltering():\n \"\"\"Content-based Filtering based on content similarities with Top-N recommendations.\"\"\"\n\n __author__ = \"Jim Glansk, Martin Bergqvist\"\n\n def __init__(self):\n self.Model = None\n self.gm = None\n self.sm = None\n self.client = steamfront.Client()\n self.apps = pd.read_csv('Resources/Genres.csv.gz')\n\n def fit(self, X=None, nGames = None):\n \"\"\"Fit traning data to model.\"\"\"\n bX = broadcast(X)\n X = bX.toPandas()\n self.generateGameGenreMatrix(X, nGames)\n self.train()\n\n def train(self):\n \"\"\"Train model with traning data and generate a similarity matrix.\"\"\"\n\n sm = self.generateSimMatrix(self.gm)\n\n self.sm = sm\n\n def getApps(self, appid):\n \"\"\"Get app genres from api\"\"\"\n\n\n tags = self.apps[(self.apps.appid == appid)]\n currentGenres = tags['tag'].tolist()\n # currentGame = self.client.getApp(appid=appid)\n # currentGenres = (list(currentGame.genres))\n # currentGenres.extend(list(currentGame.categories))\n return currentGenres\n\n def generateGameGenreMatrix(self, appids=None, nGames=10, save=None, file_size=''):\n \"\"\"Generate game-genre matrix (app * genre)\"\"\"\n\n if appids is None:\n steamAppList = 'http://api.steampowered.com/ISteamApps/GetAppList/v2/'\n dictGames = requests.get(steamAppList)\n jsonGames = dictGames.json()\n gameList = [i['appid'] for i in jsonGames['applist']['apps']['app']]\n appids = pd.DataFram(gameList, columns=['appid'])\n\n appids = appids['appid'].unique()\n gm = pd.DataFrame()\n gm.index.names = [\"appid\"]\n for id in tqdm(appids):\n for genre in self.getApps(id):\n if genre is not None:\n gm.set_value(id, genre, int(1))\n #print('\\rGenerate gm:{0}%'.format(round(i / appids.size * 100)), end=\"\", flush=True)\n gm = gm.fillna(value=0)\n print('\\n')\n self.gm = gm\n if save is not None:\n gm.to_csv('Resources/gamematrix{0}.csv.gz'.format(file_size), compression='gzip', mode='w+')\n\n return (gm)\n\n def generateSimMatrix(self, dataset=None, save=None, file_size=''):\n \"\"\"Generate similarity matrix (app * app)\"\"\"\n\n if dataset is None:\n dataset = self.gm\n tdataset = dataset.T\n appids = tdataset.columns\n simMatrix = pd.DataFrame()\n simMatrix.index.names = [\"appid\"]\n pbar = tqdm(total=len(appids)**2)\n for id1, id2 in itertools.product(appids, appids):\n simMatrix.set_value(id1, id2, 1 - cosine(tdataset[id1], tdataset[id2]))\n pbar.update(1)\n pbar.close()\n self.sm = simMatrix\n if save:\n simMatrix.to_csv('Resources/simmatrix{0}.csv.gz'.format(file_size), compression='gzip', mode='w+')\n\n return (simMatrix)\n\n def predict(self, df, nRec=None):\n \"\"\"Predict similar games from user-owned games based on game genre tags\"\"\"\n\n ones = df[df.rating == 1.0]\n preds = pd.DataFrame()\n users = np.sort(ones.steamid.unique(), axis=0);\n for i in users:\n sm = self.sm.copy(deep=True)\n #focus user\n user = ones[(ones.steamid == i)]\n #drop NA-apps\n user = user[user.appid.isin(sm.index)]\n #drop user-owned games from axis 0\n result = sm.drop(user.appid, axis=0)\n #focus axis 1 on owned games\n result = result[user.appid]\n #create new column with max similarities from row\n result['prediction'] = result.max(axis=1)\n #sort all columns in decending order and take Top-N apps\n appids = result.sort_values(['prediction'], ascending=False)\n if nRec is not None:\n appids = appids.head(nRec)\n #arrange (steamid, appid, rating, predictions)\n newpred = appids.prediction\n newpred = newpred.reset_index()\n newpred.insert(0, 'steamid', i)\n newpred.insert(2, 'rating', 0.0)\n #append result\n preds = preds.append(newpred)\n #formate to spark df\n preds.sort_values(['steamid', 'prediction'], ascending=[True, False])\n return preds\n\n def readsimilaritymatrix(self, file_size):\n \"\"\"Read similarity and Game-genre matrix from csv file\"\"\"\n\n if self.sm is None:\n sm = pd.read_csv('Resources/simmatrix{0}.csv.gz'.format(file_size), compression='gzip', index_col=['appid'], delimiter=',')\n sm.columns = sm.columns.astype('Int64')\n self.sm = sm\n gm = pd.read_csv('Resources/gamematrix{0}.csv.gz'.format(file_size), compression='gzip', index_col=['appid'], delimiter=',')\n self.gm = gm\n else:\n return('model already created')\n\n def showMatrix(self):\n \"\"\"Show similarity and game-genre matrix if created\"\"\"\n\n if self.gm is not None:\n print('GameMatrix')\n with pd.option_context('display.max_rows', self.gm.shape[0], 'display.max_columns', self.gm.shape[1]):\n print(self.gm)\n if self.sm is not None:\n print('SimilarityMatrix')\n with pd.option_context('display.max_rows', self.sm.shape[0], 'display.max_columns', self.sm.shape[1]):\n print(self.sm)\n\n#test CBF\n\n#cbf = ContentBasedFiltering()\n#cbf.readsimilaritymatrix(10000)\n#apps = pd.read_csv('Resources/formateddataset10000.csv.gz', compression='gzip')\n#cbf.generateGameGenreMatrix(apps, save=True, file_size=10000)\n#cbf.gm = pd.read_csv('Resources/gamematrix{0}.csv.gz'.format(10000), compression='gzip', index_col=['appid'], delimiter=',')\n#cbf.generateSimMatrix(cbf.gm, save=True, file_size=10000)\n#\n# #227940\n#sm = pd.read_csv('Resources/formateddataset100.csv.gz', compression='gzip')\n#user = sm[((sm.steamid == 8) & (sm.rating == 1))]\n#print(user)\n# # print(sm.collect())\n# # print(sm[sm.steamid == 0])\n# # cbf.fit(sm)\n# # print(cbf.sm)\n# data = pd.DataFrame([(0, 437900, 1)], columns=['steamid', 'appid', 'rating'])\n# #prediction = cbf.predict(data)\n# #\n# #print(prediction[prediction.appid == 227940])\n# print(prediction)\n# # sm = sm.toPandas()\n# # matrix = cbf.generateGameGenreMatrix(sm['appid'])\n# # simmatrix = cbf.generateSimMatrix(matrix)\n# #\n# # print(matrix)\n# # print(simmatrix)\n\n#0 1 1 cf 0 42910 1.0 0.708491 4\n#1 1 1 cf 1 437900 1.0 0.000000 145\n#2 1 1 cf 2 7760 1.0 0.448446 37" }, { "alpha_fraction": 0.6976568698883057, "alphanum_fraction": 0.7203325629234314, "avg_line_length": 27.782608032226562, "blob_id": "7ff89ac3a35f027b0c0b56234c28e9ca89462443", "content_id": "fc89c07c693d25dd1f47b48ecfb4b0b1fc8019ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1323, "license_type": "no_license", "max_line_length": 83, "num_lines": 46, "path": "/Recommender.py", "repo_name": "Tkocz/pysteam", "src_encoding": "UTF-8", "text": "from pyspark import SparkContext\nfrom pyspark.sql import SparkSession\nfrom pyspark.mllib.recommendation import ALS, MatrixFactorizationModel, Rating\nimport numpy as np\nimport time\n\n\nss = spark = SparkSession\\\n .builder\\\n .appName(\"ALSExample\")\\\n .getOrCreate()\n\nsc = ss.sparkContext\nsc.setLogLevel('OFF')\n\n# $example on$d\n# Load and parse the data\n\ndf = spark.read.csv('Resources/formateddataset.csv', header=True, inferSchema=True)\nprint(df.select('steamid').show())\n\n\nratings = df.rdd\ntraining, test = ratings.randomSplit([0.8, 0.2])\n\n\n# Build the recommendation model using Alternating Least Squares\nrank = 10\nnumIterations = 10\nalpha = 40.0\nlamb = 0.01\n\nmodel = ALS.trainImplicit(ratings, rank, numIterations, lambda_=lamb, alpha=alpha)\n\ntestdata = ratings.map(lambda p: (p[0], p[1]))\npredictions = model.predictAll(testdata).map(lambda r: ((r[0], r[1]), r[2]))\njim = model.recommendProducts(2, 10)\nprint(jim)\nratesAndPreds = ratings.map(lambda r: ((r[0], r[1]), r[2])).join(predictions)\nMSE = ratesAndPreds.map(lambda r: (r[1][0] - r[1][1])**2).mean()\nprint(\"Mean Squared Error = \" + str(MSE))\n# Save and load model\n#model.save(sc, \"target/tmp/myCollaborativeFilter\")\n#sameModel = MatrixFactorizationModel.load(sc, \"target/tmp/myCollaborativeFilter\")\n# $example off$\nprint(ratesAndPreds.collect())" }, { "alpha_fraction": 0.6044110655784607, "alphanum_fraction": 0.6095729470252991, "avg_line_length": 34.53333282470703, "blob_id": "bf01fb74f6c7249efbb74a14b6d924a1f48b4b47", "content_id": "b4caccefcb161f8bc9893a7874eb17807de5e804", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2131, "license_type": "no_license", "max_line_length": 118, "num_lines": 60, "path": "/GenerateDataset.py", "repo_name": "Tkocz/pysteam", "src_encoding": "UTF-8", "text": "import json\nimport pandas as pd\nfrom steamwebapi.api import IPlayerService, ISteamUserStats\nfrom tqdm import *\n\nplayerserviceinfo = IPlayerService()\nsteamuserstats = ISteamUserStats()\n\nfeatures = [\n 'steamid',\n 'appid',\n 'playtime_forever'\n]\n\n\ndef achievementprocentage(ach):\n achieved = [i for i in ach if i['achieved'] == 1]\n return len(achieved) / len(ach)\n\nAMOUNT = 10000\n\niddict = dict()\n\njson_file = open('Resources/steamkey{0}.json'.format(AMOUNT), 'r')\njson_data = json.loads(json_file.read())\njson_file.close()\n\ndf = pd.DataFrame()\ndf.index.names = ['steamID/appID']\nid = 0\nfor steamid in tqdm(json_data):\n response = playerserviceinfo.get_owned_games(steamid)['response']\n if len(response) > 1:\n games = response['games']\n #iddict[id] = steamid\n for game in games:\n jointid = str(steamid) + \"/\" + str(game['appid'])\n df = df.append(pd.DataFrame([[jointid, int(id), int(game['appid']), int(game['playtime_forever'])]]))\n # df.set_value(jointid, 'playtime_forever', game['playtime_forever'])\n # df.set_value(jointid, 'steamid', id)\n # df.set_value(jointid, 'appid', game['appid'])\n id += 1\ndf.columns = ['steamID/appID', 'steamid', 'appid', 'playtime_forever']\ndf = df.sort_values(by=['steamid', 'appid'], ascending=[True, False])\ndf.to_csv('Resources/dataset{0}.csv.gz'.format(AMOUNT), mode=\"w+\", compression='gzip', columns=df.columns, index=None)\n\n # try:\n # currentGame = client.getApp(name=game['name'])\n # currentGenres = (list(currentGame.genres))\n # currentGenres.extend(list(currentGame.categories))\n # df.set_value(jointid, 'genres', currentGenres)\n # except:\n # continue\n\n # try:\n # achievements = steamuserstats.get_player_achievements(steamid, game['appid'])['playerstats'][\n # 'achievements']\n # df.set_value(jointid, 'achievements', achievementprocentage(achievements))\n # except:\n # df.set_value(jointid, 'achievements', None)" }, { "alpha_fraction": 0.79347825050354, "alphanum_fraction": 0.79347825050354, "avg_line_length": 45, "blob_id": "8c56586555cfb1c28c29529d1d4389cc3addf42e", "content_id": "9e94904f4164eabadaf0efd53dc895d43d4e479a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 92, "license_type": "no_license", "max_line_length": 81, "num_lines": 2, "path": "/README.md", "repo_name": "Tkocz/pysteam", "src_encoding": "UTF-8", "text": "# pysteam\nTools used for data extraction & analysis, developed as part of a bachelor thesis\n" }, { "alpha_fraction": 0.6269968152046204, "alphanum_fraction": 0.6663338541984558, "avg_line_length": 37.523075103759766, "blob_id": "8b9821b1e37cab4905636380f08b940649b54d94", "content_id": "28df957b6cba432ca6ca96dd19f940840f51f544", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5008, "license_type": "no_license", "max_line_length": 247, "num_lines": 130, "path": "/sparksession_mean.py", "repo_name": "Tkocz/pysteam", "src_encoding": "UTF-8", "text": "from __future__ import print_function\n\nimport sys\n\nif sys.version >= '3':\n long = int\n\nfrom pyspark.sql import SparkSession\nfrom pyspark.ml.evaluation import RegressionEvaluator\nfrom pyspark.ml.evaluation import BinaryClassificationEvaluator\nfrom pyspark.ml.recommendation import ALS\nimport pandas as pd\nimport numpy as np\nimport itertools\n\ndef flipBit(df, nUser):\n ones = df[df.rating == 1.0].toPandas().values\n zeroes = df[df.rating == 0.0]\n id = np.array(np.unique(ones[:, 0]), dtype=int)\n index = np.random.choice(id, nUser, replace=False)\n ones[index, 2] = 0.0\n newpdf = pd.DataFrame(ones, columns=[\"user\", \"item\", \"rating\"])\n newpdf[[\"user\", \"item\"]] = newpdf[[\"user\", \"item\"]].astype(int)\n newdf = spark.createDataFrame(newpdf)\n newdf = newdf.union(zeroes)\n target = df.subtract(newdf)\n return newdf, target\n\nspark = SparkSession \\\n .builder \\\n .appName(\"pysteam\") \\\n .getOrCreate()\n\nspark.sparkContext.setLogLevel('OFF')\n\n# run1 : The best model was trained with rank = 12, lambda = 0.05, alpha = 10and numIter = 12, and its RMSE on the test set is 0.257741. mean-square error = 0.009006494757883858 mean absolute error = 0.06807511706369994 lmbda 0.01, 0.02, 0.05\n# run2 : The best model was trained with rank = 12, lambda = 0.15, alpha = 10and numIter = 12, and its RMSE on the test set is 0.259563. mean-square error = 0.008499430241066145 mean absolute error = 0.0668242950350116 lambdas = [0.05, 0.1, 0.15]\n\n# params\nranks = np.arange(8, 20, 2)\nlambdas = np.linspace(0.01, 0.5, 10.0)\nnumIters = np.arange(8, 20, 2)\nalpha = np.arange(8, 40, 2)\nbestModel = None\nbestValidationRmse = float(\"inf\")\nbestRank = 0\nbestLambda = -1.0\nbestNumIter = -1\nbestAlpha = 0\n\n# process data\ndataset = spark.read.csv('Resources/formateddataset1000.csv', header=True, inferSchema=True)\nprint(dataset.select(dataset.steamid).distinct().count())\nprint(dataset.select(dataset.appid).distinct().count())\n(training, validation) = dataset.randomSplit([0.9, 0.1])\n\nevaluator = RegressionEvaluator(metricName=\"rmse\", labelCol=\"rating\", predictionCol=\"prediction\")\nbevaluator = BinaryClassificationEvaluator(labelCol=\"rating\")\n\npm = [i for i in itertools.product(ranks, lambdas, numIters, alpha)]\nindexes = np.random.permutation(len(pm))\nindexes = [pm[i] for i in indexes[:1]]\ncount = 0\nfor rank, lmbda, numIter, alf in indexes:\n\n for i in range(1):\n (opttrain, optval) = validation.randomSplit([0.8, 0.2])\n model = ALS(implicitPrefs=True, rank=rank, regParam=lmbda, maxIter=numIter, alpha=alf, userCol=\"steamid\",\n itemCol=\"appid\", ratingCol=\"rating\").fit(opttrain)\n predictions = model.transform(optval)\n validationRmse = evaluator.evaluate(predictions)\n print(\"\\n\")\n print(\"RMSE (validation) = %f for the model trained with \" % validationRmse + \\\n \"rank = %d, lambda = %.2f, and numIter = %d. alpha = %d\" % (rank, lmbda, numIter, alf))\n\n if (validationRmse < bestValidationRmse):\n bestModel = model\n bestValidationRmse = validationRmse\n bestRank = rank\n bestLambda = lmbda\n bestNumIter = numIter\n bestAlpha = alf\n count += 1\n print(round((count / len(indexes)) * 100, 0), '%')\nprint(\"The best model was trained on evalData with rank = %d, lambda = %.2f, alpha = %d, \" % (bestRank, bestLambda, bestAlpha) \\\n + \"numIter = %d and RMSE %f.\" % (bestNumIter, bestValidationRmse))\n\n\nsetvalues = ['all', 'zeroes', 'ones']\n\npdf = pd.DataFrame()\n\n\nfor i in range(2):\n (train, test) = training.randomSplit([0.8, 0.2])\n model = ALS(implicitPrefs=True, rank=bestRank, regParam=bestLambda, maxIter=bestNumIter, alpha=bestAlpha, userCol=\"steamid\",\n itemCol=\"appid\", ratingCol=\"rating\").fit(train)\n predictions = model.transform(test)\n ones = predictions.where(\"rating=1\")\n zeroes = predictions.where(\"rating=0\")\n predictors = {'all': predictions, 'zeroes': zeroes, 'ones': ones}\n\n for s, p in predictors.items():\n pdf = pdf.append(pd.DataFrame([[i, s, evaluator.setParams(metricName=\"rmse\").evaluate(p), evaluator.setParams(metricName=\"mse\").evaluate(p), evaluator.setParams(metricName=\"mae\").evaluate(p)]]))\n count += 1\n print(round((i / 10) * 100, 0), '%')\npdf.columns = ['iteration', 'type', 'rmse', 'mse', 'mae']\nprint(pdf)\nprint(pdf.groupby(by=['type'], axis=0).mean())\n# brier score\n# AUC\n\n# setvalues = ['all', 'zeroes', 'ones']\n#\n# em = pd.DataFrame(columns=['rmse', 'mse', 'mae'])\n# em.index.names = [\"set values\"]\n#\n# ones = predictions.where(\"rating=1\")\n# zeroes = predictions.where(\"rating=0\")\n# predictors = {'all': predictions, 'zeroes': zeroes, 'ones': ones}\n#\n#\n# for s, p in predictors.items():\n# em.set_value(s, \"rmse\", evaluator.setParams(metricName=\"rmse\").evaluate(p))\n# em.set_value(s, \"mse\", evaluator.setParams(metricName=\"mse\").evaluate(p))\n# em.set_value(s, \"mae\", evaluator.setParams(metricName=\"mae\").evaluate(p))\n#\n# print(em)\n\nspark.stop()\n" }, { "alpha_fraction": 0.5569244027137756, "alphanum_fraction": 0.5699872374534607, "avg_line_length": 41.6063346862793, "blob_id": "cab755acc4e4b9aee87cffcf27d5b6d70e8325cb", "content_id": "86bdcfff784a6f170f879857bab6d858b6b4f796", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9416, "license_type": "no_license", "max_line_length": 140, "num_lines": 221, "path": "/CollaborativeFiltering.py", "repo_name": "Tkocz/pysteam", "src_encoding": "UTF-8", "text": "from __future__ import print_function\nimport sys\nif sys.version >= '3':\n long = int\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.functions import broadcast\nfrom pyspark.ml.evaluation import RegressionEvaluator\nfrom pyspark.ml.evaluation import BinaryClassificationEvaluator\nfrom pyspark.ml.recommendation import ALS\nfrom pyspark.ml.tuning import CrossValidator, ParamGridBuilder\nimport pandas as pd\nimport numpy as np\nimport itertools\n\n#http://127.0.0.1:4041/jobs/\n\nclass CollaborativFiltering():\n \"\"\"Content-based Filtering based on content similarities with Top-N recommendations.\"\"\"\n\n __author__ = \"Jim Glansk, Martin Bergqvist\"\n\n def __init__(self):\n self.spark = SparkSession \\\n .builder \\\n .appName(\"pysteam\") \\\n .master(\"local[*]\") \\\n .config(\"spark.driver.memory\", \"16g\") \\\n .getOrCreate()\n\n self.als = ALS(implicitPrefs=True,\n userCol=\"steamid\",\n itemCol=\"appid\", ratingCol=\"rating\")\n self.spark.sparkContext.setLogLevel('OFF')\n self.bestModel = None\n self.bestValidationRmse = None\n self.bestRank = None\n self.bestLambda = None\n self.bestNumIter = None\n self.bestAlpha = None\n self.model = None\n\n def fit(self, X, rank=None, nIter=None, lmbda=None, alpha=None):\n \"\"\"Fit traning data to model.\"\"\"\n\n rank = 12 if rank is None else self.rank\n nIter = 10 if nIter is None else self.bestNumIter\n lmbda = 0.01 if lmbda is None else self.bestLambda\n alpha = 40.0 if alpha is None else self.bestAlpha\n self.train(X, rank, nIter, lmbda, alpha)\n\n def train(self, X, rank, nIter, lmbda, alpha):\n \"\"\"Train model with traning data and generate a similarity matrix.\"\"\"\n\n self.model = ALS(implicitPrefs=True,\n rank=rank,\n maxIter=nIter,\n regParam=lmbda,\n alpha=alpha,\n userCol=\"steamid\",\n itemCol=\"appid\",\n ratingCol=\"rating\").fit(X)\n return self.model\n\n def predict(self, users):\n \"\"\"Predict similar games from user-owned games based on game genre tags\"\"\"\n predictions = self.model.transform(users)\n predictions = predictions.sort(['steamid', 'prediction'], ascending=[True, False])\n return predictions\n\n def evalModel(self, X, numTrain):\n \"\"\"Evaluate model from training\"\"\"\n\n pdf = pd.DataFrame()\n evaluator = RegressionEvaluator(metricName=\"rmse\", labelCol=\"rating\", predictionCol=\"prediction\")\n count = 0\n for i in range(numTrain):\n (train, test) = X.randomSplit([0.8, 0.2])\n model = ALS(implicitPrefs=True,\n rank=self.bestRank,\n maxIter=self.bestNumIter,\n regParam=self.bestLambda,\n alpha=self.bestAlpha,\n userCol=\"steamid\",\n itemCol=\"appid\", ratingCol=\"rating\").fit(train)\n predictions = model.transform(test)\n ones = predictions.where(\"rating=1\")\n zeroes = predictions.where(\"rating=0\")\n predictors = {'all': predictions, 'zeroes': zeroes, 'ones': ones}\n\n for s, p in predictors.items():\n pdf = pdf.append(pd.DataFrame([[i, s,\n evaluator.setParams(metricName=\"rmse\").evaluate(p),\n evaluator.setParams(metricName=\"mse\").evaluate(p),\n evaluator.setParams(metricName=\"mae\").evaluate(p)]]))\n count += 1\n print(round((i / 10) * 100, 0), '%')\n pdf.columns = ['iteration', 'type', 'rmse', 'mse', 'mae']\n print(pdf)\n print(pdf.groupby(by=['type'], axis=0).mean())\n\n def crossValidator(self, X, test):\n\n paramMapExplicit = ParamGridBuilder() \\\n .addGrid(self.als.rank, [8, 12]) \\\n .addGrid(self.als.maxIter, [8, 12]) \\\n .addGrid(self.als.regParam, [0.01, 0.1]) \\\n .addGrid(self.als.alpha, [10, 40]) \\\n .build()\n\n crossval = CrossValidator(estimator=self.als,\n estimatorParamMaps=paramMapExplicit,\n evaluator=RegressionEvaluator(metricName=\"rmse\", labelCol=\"rating\", predictionCol=\"prediction\"),\n numFolds=1\n )\n cvModel = crossval.fit(X)\n print(cvModel.bestModel.rank)\n print(cvModel.bestModel.maxIter())\n print(cvModel.bestModel.regParam)\n print(cvModel.bestModel.alpha)\n\n cvModel.bestModel.transform(test).collect()\n\n\n def paramOpt(self, X, numVal, numParam):\n \"\"\"Optimize parameters to find best model\"\"\"\n\n bestParams = pd.DataFrame()\n\n ranks = np.arange(8, 20, 2)\n lambdas = np.linspace(0.01, 0.5, 10)\n numIters = np.arange(8, 20, 2)\n alpha = np.arange(8, 40, 2)\n bestValidationRmse = float(\"inf\")\n bestRank = 0\n bestLambda = -1.0\n bestNumIter = -1\n bestAlpha = 0\n evaluator = RegressionEvaluator(metricName=\"rmse\",\n labelCol=\"rating\",\n predictionCol=\"prediction\")\n pm = [i for i in itertools.product(ranks, lambdas, numIters, alpha)]\n indexes = np.random.permutation(len(pm))\n indexes = [pm[i] for i in indexes[:numParam]]\n count = 0\n for rank, lmbda, numIter, alf in indexes:\n for i in range(numVal):\n (opttrain, optval) = X.randomSplit([0.8, 0.2])\n model = ALS(implicitPrefs=True, rank=rank, regParam=lmbda, maxIter=numIter, alpha=alf,\n userCol=\"steamid\",\n itemCol=\"appid\", ratingCol=\"rating\").fit(opttrain)\n predictions = model.transform(optval)\n validationRmse = evaluator.evaluate(predictions)\n print(\"\\n\")\n print(validationRmse)\n print(\"RMSE (validation) = %f for the model trained with \" % validationRmse + \\\n \"rank = %d, lambda = %.2f, and numIter = %d. alpha = %d\" % (rank, lmbda, numIter, alf))\n\n if (validationRmse < bestValidationRmse):\n bestModel = model\n bestValidationRmse = validationRmse\n bestRank = rank\n bestLambda = lmbda\n bestNumIter = numIter\n bestAlpha = alf\n count += 1\n print(round((count / len(indexes)) * 100, 0), '%')\n print(\"The best model was trained on evalData with rank = %d, lambda = %.2f, alpha = %d, \" % (\n bestRank, bestLambda, bestAlpha) \\\n + \"numIter = %d and RMSE %f.\" % (bestNumIter, bestValidationRmse))\n self.bestRank, self.bestNumIter, self.bestLambda, self.bestAlpha = bestRank, bestNumIter, bestLambda, bestAlpha\n bestParams = bestParams.append(pd.DataFrame([[numVal, numParam, bestRank, bestNumIter, bestLambda, bestAlpha, bestValidationRmse]]))\n bestParams.columns = ['nValidationIter', 'nValidationParams', 'bestRank', 'bestnIter', 'bestLambda', 'bestAlpha', 'bestRmse']\n bestParams.to_csv('Resources/params.csv.gz', compression='gzip', mode='a', header=None)\n self.bestModel = bestModel\n\n return bestModel\n\n def setOptParams(self):\n params = pd.read_csv('Resources/params.csv')\n\n self.bestRank = params.bestRank\n self.bestNumIter = params.bestnIter\n self.bestLambda = params.bestLambda\n self.bestAlpha = params.bestAlpha\n\n def flipBit(self, df, nUsers):\n ones = df[df.rating == 1.0].toPandas().values\n zeroes = df[df.rating == 0.0]\n indexes = np.array(np.unique(ones[:, 0], return_index=True)[1], dtype=int)\n r_indexes = np.random.choice(indexes, nUsers, replace=False)\n ones[r_indexes, 2] = 0.0\n newpdf = pd.DataFrame(ones, columns=[\"steamid\", \"appid\", \"rating\"])\n newpdf[[\"steamid\", \"appid\"]] = newpdf[[\"steamid\", \"appid\"]].astype(int)\n newdf = self.spark.createDataFrame(newpdf)\n newdf = newdf.union(zeroes)\n target = df.subtract(newdf)\n return newdf, target\n\n def takeSamples(self, df):\n\n ones = df[df.rating == 1.0].toPandas()\n fn = lambda obj: obj.loc[np.random.choice(obj.index, 1, False), :]\n result = ones.groupby(by=['steamid'], as_index=False).apply(fn)\n target = self.spark.createDataFrame(result)\n target = target.sort(['steamid'], ascending=True)\n return target\n\n#test CF\n#CF = CollaborativFiltering()\n#dataset = CF.spark.read.csv('Resources/formateddataset1000.csv.gz', header=True, inferSchema=True)\n#(training, validation) = dataset.randomSplit([0.9, 0.1])\n#(opttrain, oprtest) = validation.randomSplit([0.8, 0.2])\n#CF.paramOpt(validation, 1, 1)\n#CF.crossValidator(opttrain, oprtest)\n#CF.evalModel(training, 1)\n#(train, test) = training.randomSplit([0.8, 0.2])\n#samples = CF.takeSamples(test, 10)\n#print(samples.collect())\n#CF.fit(train)\n#predictions = CF.predict(test)\n#print(predictions.collect())\n" }, { "alpha_fraction": 0.6025640964508057, "alphanum_fraction": 0.6274510025978088, "avg_line_length": 25.019607543945312, "blob_id": "0283d6986abb121b2c18ef6485ca5ad66bd230f3", "content_id": "52ca5d139ade1d4e65dc5fe661e48ae6e9fe0206", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1326, "license_type": "no_license", "max_line_length": 158, "num_lines": 51, "path": "/Heuristic.py", "repo_name": "Tkocz/pysteam", "src_encoding": "UTF-8", "text": "from __future__ import print_function\nimport pandas as pd\nimport numpy as np\nfrom tqdm import *\n\n\ndef evaluatetime(time):\n\n time = time / 60\n # if time == 0:\n # return 0\n if time >= 0 and time <= 1:\n return 0\n else:\n return 1\n # if time > 1 and time <= 5:\n # return 2\n # if time > 5 and time <= 20:\n # return 3\n # if time > 20 and time < 50:\n # return 4\n # else:\n # return 5\n\nAMOUNT = 1000\n\ndataset = pd.read_csv('Resources/dataset{0}.csv.gz'.format(AMOUNT), compression='gzip')\n\ngames = np.unique(dataset['appid'])\nsteamlist = []\nmatrix = pd.DataFrame(columns=games)\nmatrix.index.names = [\"steamid\"]\n\nfor row in tqdm(dataset.values):\n matrix.set_value(int(row[1]), int(row[2]), 1)\n\nprint('Wait..')\nmatrix = matrix.fillna(value=0)\nsdf = matrix.to_sparse(fill_value=0)\nprint('nUsers:', len(dataset.index), 'Sparsity:', 1 - sdf.density, 'Density:', sdf.density)\n\ntestlist = list(zip(matrix.index, matrix.columns))\n\nsteamlist = list()\n\nfor i in tqdm(matrix.index):\n for j in matrix.columns:\n steamlist.append((i, j, matrix.ix[i, j]))\n\nmatrix = pd.DataFrame().from_records(steamlist)\nmatrix.to_csv('Resources/formateddataset{0}.csv.gz'.format(AMOUNT), mode='w+', compression='gzip', header=[\"steamid\", \"appid\", \"rating\"], index=None, sep=',')" }, { "alpha_fraction": 0.5516778230667114, "alphanum_fraction": 0.5597315430641174, "avg_line_length": 40.38888931274414, "blob_id": "eb469040514ac88097fb62253c100637f03159e6", "content_id": "2397b8c1b584af7363936f8e84850968354010b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 746, "license_type": "no_license", "max_line_length": 94, "num_lines": 18, "path": "/Ranking.py", "repo_name": "Tkocz/pysteam", "src_encoding": "UTF-8", "text": "import pandas as pd\n\nclass Rank:\n def rank(self, predictions, users, i):\n results = pd.DataFrame()\n\n for type, data in predictions.items():\n # gör ett test av kod snuten\n subset = data.where((data.rating == 0.0) | (\n (data.steamid.isin(users.steamid)) & (data.appid.isin(users.appid)))).dropna()\n subset['rank'] = subset.groupby('steamid').cumcount() + 1\n targets = subset.merge(users, how='inner', on=('steamid', 'appid'))\n targets[['steamid', 'appid']] = targets[['steamid', 'appid']].astype(int)\n targets.insert(0, 'iter', i + 1)\n targets.insert(1, 'type', type)\n results = results.append(targets)\n\n return results\n" }, { "alpha_fraction": 0.6536274552345276, "alphanum_fraction": 0.6954742074012756, "avg_line_length": 35.94117736816406, "blob_id": "4f0f9976dd0aba86213de8b226a41133345682eb", "content_id": "b45d4c53466b1e9225fca93483218a239722bf29", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4397, "license_type": "no_license", "max_line_length": 247, "num_lines": 119, "path": "/sparksession.py", "repo_name": "Tkocz/pysteam", "src_encoding": "UTF-8", "text": "from __future__ import print_function\n\nimport sys\n\nif sys.version >= '3':\n long = int\n\nfrom pyspark.sql import SparkSession\nfrom pyspark.ml.evaluation import RegressionEvaluator\nfrom pyspark.ml.evaluation import BinaryClassificationEvaluator\nfrom pyspark.ml.recommendation import ALS\nimport pandas as pd\nimport numpy as np\nimport itertools\n\ndef flipBit(df, nUser):\n ones = df[df.rating == 1.0].toPandas().values\n zeroes = df[df.rating == 0.0]\n id = np.array(np.unique(ones[:, 0]), dtype=int)\n index = np.random.choice(id, nUser, replace=False)\n ones[index, 2] = 0.0\n newpdf = pd.DataFrame(ones, columns=[\"steamid\", \"appid\", \"rating\"])\n newpdf[[\"steamid\", \"appid\"]] = newpdf[[\"steamid\", \"appid\"]].astype(int)\n newdf = spark.createDataFrame(newpdf)\n newdf = newdf.union(zeroes)\n target = df.subtract(newdf)\n return newdf, target\n\nspark = SparkSession \\\n .builder \\\n .appName(\"pysteam\") \\\n .getOrCreate()\n\nspark.sparkContext.setLogLevel('OFF')\n\n# run1 : The best model was trained with rank = 12, lambda = 0.05, alpha = 10and numIter = 12, and its RMSE on the test set is 0.257741. mean-square error = 0.009006494757883858 mean absolute error = 0.06807511706369994 lmbda 0.01, 0.02, 0.05\n# run2 : The best model was trained with rank = 12, lambda = 0.15, alpha = 10and numIter = 12, and its RMSE on the test set is 0.259563. mean-square error = 0.008499430241066145 mean absolute error = 0.0668242950350116 lambdas = [0.05, 0.1, 0.15]\n\n# params\nranks = np.arange(8, 20, 2)\nlambdas = np.linspace(0.01, 0.5, 10)\nnumIters = np.arange(8, 20, 2)\nalpha = np.arange(8, 40, 2)\nbestModel = None\nbestValidationRmse = float(\"inf\")\nbestRank = 0\nbestLambda = -1.0\nbestNumIter = -1\nbestAlpha = 0\n\n# process data\ndataset = spark.read.csv('Resources/formateddataset1000.csv', header=True, inferSchema=True)\n\n(training, validation, test) = dataset.randomSplit([0.6, 0.2, 0.2])\ntraining, target = flipBit(training, 100)\ntarget.show()\nevaluator = RegressionEvaluator(metricName=\"rmse\", labelCol=\"rating\", predictionCol=\"prediction\")\nbevaluator = BinaryClassificationEvaluator(labelCol=\"rating\")\n\npm = [i for i in itertools.product(ranks, lambdas, numIters, alpha)]\nindexes = np.random.permutation(len(pm))\nindexes = [pm[i] for i in indexes[:10]]\ncount = 0\nfor rank, lmbda, numIter, alf in indexes:\n\n model = ALS(implicitPrefs=True, rank=rank, regParam=lmbda, maxIter=numIter, alpha=alf, userCol=\"steamid\",\n itemCol=\"appid\", ratingCol=\"rating\").fit(training)\n predictions = model.transform(validation)\n validationRmse = evaluator.evaluate(predictions)\n print(\"\\n\")\n print(\"RMSE (validation) = %f for the model trained with \" % validationRmse + \\\n \"rank = %d, lambda = %.2f, and numIter = %d. alpha = %d\" % (rank, lmbda, numIter, alf))\n\n if (validationRmse < bestValidationRmse):\n bestModel = model\n bestValidationRmse = validationRmse\n bestRank = rank\n bestLambda = lmbda\n bestNumIter = numIter\n bestAlpha = alf\n\n count += 1\n #print('\\r{0}%'.format(round((count / len(indexes)) * 100, 0)), end=\"\", flush=True)\n\nprint(\"The best model was trained on evalData with rank = %d, lambda = %.2f, alpha = %d, \" % (bestRank, bestLambda, bestAlpha) \\\n + \"numIter = %d and RMSE %f.\" % (bestNumIter, bestValidationRmse))\n\n\n# brier score\n# AUC\n\ntargetPrediction = bestModel.transform(target)\nprint('target prediction', targetPrediction.collect())\nprint('target RMSE:', evaluator.setParams(metricName=\"rmse\").evaluate(targetPrediction))\nprint('target MSE:', evaluator.setParams(metricName=\"mse\").evaluate(targetPrediction))\nprint('target MAE:', evaluator.setParams(metricName=\"mae\").evaluate(targetPrediction))\n\npredictions = bestModel.transform(test)\n\nsetvalues = ['all', 'zeroes', 'ones']\n\nem = pd.DataFrame(columns=['rmse', 'mse', 'mae'])\nem.index.names = [\"set values\"]\n\nones = predictions.where(\"rating=1\")\nzeroes = predictions.where(\"rating=0\")\npredictors = {'all': predictions, 'zeroes': zeroes, 'ones': ones}\n\n#fpr, tpr, thresholds = roc_curve(predictions, pred, pos_label=2)\n#auc(fpr, tpr)\n\nfor s, p in predictors.items():\n em.set_value(s, \"rmse\", evaluator.setParams(metricName=\"rmse\").evaluate(p))\n em.set_value(s, \"mse\", evaluator.setParams(metricName=\"mse\").evaluate(p))\n em.set_value(s, \"mae\", evaluator.setParams(metricName=\"mae\").evaluate(p))\n\nprint(em)\n\nspark.stop()\n\n" }, { "alpha_fraction": 0.6048728823661804, "alphanum_fraction": 0.6247351765632629, "avg_line_length": 45.06097412109375, "blob_id": "b5c7a3c907a4e445ffd0b86746b8625f18a107af", "content_id": "4a6a8501fb7b06446ce8bc1e55087d5226b0b01d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3792, "license_type": "no_license", "max_line_length": 122, "num_lines": 82, "path": "/Statistics.py", "repo_name": "Tkocz/pysteam", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nimport CheckCSV as CC\n\nplt.style.use('ggplot')\n\n\nclass Statistics:\n\n def evaluateExperiment(self, path):\n \"\"\"Evaluate binary experiment data\"\"\"\n csv = CC.CheckCSV()\n ngames = csv.get_n_owned_games(10000)\n\n data = pd.read_csv(path, compression='gzip',\n usecols=['iter', 'type', 'steamid', 'appid', 'rating', 'prediction', 'rank'])\n data = pd.merge(data, ngames, on='steamid')\n with pd.option_context('display.max_rows', data.shape[0], 'display.max_columns', data.shape[1]):\n print(data.groupby(by=['type'], axis=0).mean()['rank'])\n #print(data.groupby(by=['type', 'steamid', 'appid'], axis=0).mean()['rank'])\n #print(data.groupby(by=['type', 'steamid'], axis=0).mean()['rank'])\n\n CBF = data[data.type == 'cbf']\n CF = data[data.type == 'cf']\n\n if(np.array_equal(CBF[['steamid', 'appid']], CF[['steamid', 'appid']]) and CBF.values.shape == CF.values.shape):\n print('PASS - Equal')\n else:\n print('FAIL - Not Equal')\n\n axe = data[(data.nGames <= 1000)].groupby(by=['type', 'nGames'], axis=0).mean()['rank'].reset_index()\n g = sns.lmplot(y='rank', x='nGames', data=axe, hue='type', fit_reg=False)\n axe1 = data[(data.nGames <= 1000)].groupby(by=['type', 'appid'], axis=0).mean()['rank'].reset_index()\n g1 = sns.lmplot(y='rank', x='appid', data=axe1, hue='type', fit_reg=False)\n cfaxe = data[(data.type == 'cf') & (data.nGames <= 100)]\n cbfaxe = data[(data.type == 'cbf') & (data.nGames <= 100)]\n sns.set()\n\n cfg = sns.jointplot(x=\"nGames\", y=\"rank\", data=cfaxe, kind='kde', color=\"b\")\n cfg.set_axis_labels(\"Number of Games\", \"Rank\")\n cfg.fig.suptitle('CF')\n cbfg = sns.jointplot(x=\"nGames\", y=\"rank\", data=cbfaxe, kind='kde', color=\"r\")\n cbfg.set_axis_labels(\"Number of Games\", \"Rank\")\n cbfg.fig.suptitle('CBF')\n plt.show()\n\n def evaluateUser(self, path, minGames=0, maxGames=float('inf')):\n \"\"\"Evaluate distribution of games and users\"\"\"\n\n data = pd.read_csv(path, compression='gzip')\n apps = data[(data.rating == 1.0)].groupby(by=['steamid']).rating.count().reset_index()\n apps = apps[((apps.rating <= maxGames) & (apps.rating >= minGames))]\n datafilt = data.where((data.steamid.isin(apps.steamid)) & (data.rating == 1.0)).dropna()\n nGames = datafilt.appid.nunique()\n nUsers = apps.steamid.nunique()\n if maxGames == float('inf'):\n maxGames = apps.rating.max()\n apps.rating.hist(bins=maxGames - minGames)\n plt.title(\"Game Distribution Histogram\")\n plt.xlabel(\"Games\")\n plt.ylabel(\"Users\")\n plt.figtext(.82, .02, \"nGames: {0}\".format(nGames))\n plt.figtext(.02, .02, \"nUsers: {0}\".format(nUsers))\n plt.show()\n\n\nstat = Statistics()\n\n#stat.evaluateUser('Resources/formateddataset10000.csv.gz', minGames=0, maxGames=1000)\n\nstat.evaluateExperiment('ExperimentData/E1-10000-30-2-201708172154.csv.gz')\n\n#Titta på variationen mellan spel spelar äger (usertags) / entropy / consinesimilarity och antal spel\n#Kolla om det skilelr mellan usertags och latenta predictioner genom att mätta medelvärdet för ranken för grupperade spel.\n#Hur vanligt förekommande är spel\n#variance impoprtence med ranfomforest med variabler som variance, genere, frekvence med.\n# Vad kan vi se utifrån steam tjänst användarens preferencer.\n# speltid som heaurustic.\n# I vilket lägge ska jag använda repektive algorith beroendes av en unik spelare.\n# Finns det nått intressant med att titta på att en spealr faktiskt köper ett spel och sedan spelar det." }, { "alpha_fraction": 0.5885604619979858, "alphanum_fraction": 0.5945686101913452, "avg_line_length": 37.51852035522461, "blob_id": "97024a14d796825f8d9da1453ef1fd513da5ecbd", "content_id": "4922a57335d697fe4eb28db88efaf476464401ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4161, "license_type": "no_license", "max_line_length": 132, "num_lines": 108, "path": "/CheckCSV.py", "repo_name": "Tkocz/pysteam", "src_encoding": "UTF-8", "text": "import requests\nimport pandas as pd\nimport numpy as np\nfrom pandas.io.json import json_normalize\nfrom time import sleep\nfrom tqdm import *\n\n\nclass CheckCSV:\n\n def removeLegacy(self, path=None):\n \"\"\"Remove obsolete games from choosen dataset\"\"\"\n\n df = pd.read_csv(path, compression='gzip')\n print(df.shape)\n gamelist = pd.read_csv('Resources/Genres.csv.gz', usecols=['appid'])\n gamelist = pd.DataFrame(gamelist.appid.unique(), columns=['appid'])\n print(gamelist)\n filter_df = pd.merge(df, gamelist, on='appid', how='inner')\n filter_df = filter_df.dropna()\n filter_df = filter_df.sort_values(['steamid', 'appid'], ascending=[True, True])\n print('done')\n print(filter_df.shape)\n print(filter_df)\n print(np.setdiff1d(df['appid'].unique(), filter_df['appid'].unique()))\n filter_df.to_csv(path, compression='gzip', columns=['steamid', 'appid', 'rating'], index=None)\n\n @staticmethod\n def remove_min_games(df, minGames=0):\n if(minGames > 0):\n data = df.copy()\n users = data[(data.rating == 1.0)].groupby(by=['steamid']).rating.count().reset_index()\n users = users[(users.rating >= minGames)]\n datafilt = data.where((data.steamid.isin(users.steamid))).dropna()\n #print(df.steamid.nunique(), df.appid.nunique())\n #print(datafilt.steamid.nunique(), datafilt.appid.nunique())\n #with pd.option_context('display.max_rows', df.shape[0], 'display.max_columns', 6):\n #print(pd.concat([df, datafilt], axis=1))\n datafilt[['steamid', 'appid']] = datafilt[['steamid', 'appid']].astype(int)\n return datafilt\n else:\n return df\n\n @staticmethod\n def get_n_owned_games(file_size):\n \"\"\"Get number of games accosiated with steam id\"\"\"\n df = pd.read_csv('Resources/formateddataset{0}.csv.gz'.format(file_size), compression='gzip', usecols=['steamid', 'rating'])\n nGames = df[(df.rating == 1.0)].groupby(by=['steamid']).rating.count().reset_index()\n nGames.columns = ['steamid', 'nGames']\n return(nGames)\n\n\n def checkapp(self, app):\n \"\"\"Check if game is applies for Content-based filtering\"\"\"\n\n data = requests.get('http://store.steampowered.com/api/appdetails?appids={0}&format=json'.format(app)).json()\n\n if data[str(app)][\"success\"]:\n type = data[str(app)][\"data\"]['type']\n if (type != 'game'):\n return False\n return data[str(app)][\"success\"]\n\n def getAllValidGames(self):\n \"\"\"Check all games not sutied for Content-based filtering\"\"\"\n\n gamelist = pd.read_csv('Resources/allgames.csv.gz', compression='gzip')\n games = []\n appids = gamelist['appid'].unique()\n i = 0\n pbar = tqdm(total=appids.size)\n pbar.set_description('Processing ')\n appsize = appids.shape[0] - 1\n while (i <= appsize):\n try:\n if self.checkapp(appids[i]):\n games.append(appids[i])\n i += 1\n pbar.update(1)\n except:\n pbar.set_description('{(-_-)}Zzz..')\n sleep(5)\n pbar.set_description('Processing ')\n continue\n pbar.close()\n validgames = pd.DataFrame(games)\n validgames.to_csv('Resources/validgames.csv')\n\n\n def check_size(self, path):\n\n dataset = pd.read_csv(path)\n dataset['steamid'].unique().size()\n\n def getValidGamesList(self):\n \"\"\"Get list of all games available on steam as of knowledge\"\"\"\n\n data = requests.get('http://api.steampowered.com/ISteamApps/GetAppList/v2/').json()\n df = json_normalize(data['applist'], 'apps')\n df.to_csv('Resources/allgames.csv.gz', compression='gzip', index=False)\n\n\n#csv = CheckCSV()\n#csv.get_n_owned_games('Resources/formateddatasetMJL.csv.gz')\n#csv.removeLegacy('Resources/formateddataset10000.csv.gz')\n#df = pd.read_csv('Resources/formateddatasetMJL.csv.gz', compression='gzip')\n#csv.removeMinGames(df, minGames=5)\n#csv.getValidGamesList()\n\n" }, { "alpha_fraction": 0.670113742351532, "alphanum_fraction": 0.683557391166687, "avg_line_length": 32.344825744628906, "blob_id": "3531b072a08edf318312e841aadb3cd2fca91554", "content_id": "0e6b75b7cdc6ce590c305fb89101d48d04bef586", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 967, "license_type": "no_license", "max_line_length": 116, "num_lines": 29, "path": "/Crawl.py", "repo_name": "Tkocz/pysteam", "src_encoding": "UTF-8", "text": "from __future__ import print_function\nfrom steamwebapi.api import ISteamUser, IPlayerService, ISteamUserStats\nimport json\n\n\njson_file = open('Resources/orgkey.json', 'r')\nsteamID = json.loads(json_file.read())\njson_file.close()\n\nsteamuserinfo = ISteamUser()\nplayerserviceinfo = IPlayerService()\n\nAMOUNT = 1000\n\ncount = 0\nwhile len(steamID) < AMOUNT:\n state = steamuserinfo.get_player_summaries(steamID[count])['response']['players'][0]['communityvisibilitystate']\n if state == 3:\n friendslist = steamuserinfo.get_friends_list(steamID[count])['friendslist']['friends']\n for i in friendslist:\n if int(i['steamid']) not in steamID:\n steamID.append(int(i['steamid']))\n print('\\r{0}%'.format(round(len(steamID) / AMOUNT * 100)), end=\"\", flush=True)\n count += 1\nprint('\\n')\nprint('nUsers: ', len(steamID))\njson_file = open('Resources/steamkey{0}.json'.format(AMOUNT), 'w')\njson.dump(steamID, json_file)\njson_file.close()\n" }, { "alpha_fraction": 0.5455525517463684, "alphanum_fraction": 0.5530997514724731, "avg_line_length": 31.561403274536133, "blob_id": "68bb50047480f74bb1b010902d77f138e494c022", "content_id": "b2f0033f6b658b570782b3146d9d663dece0b1b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1855, "license_type": "no_license", "max_line_length": 81, "num_lines": 57, "path": "/GameTags.py", "repo_name": "Tkocz/pysteam", "src_encoding": "UTF-8", "text": "import json\n\nfrom bs4 import BeautifulSoup\nimport mechanize\nimport pandas as pd\nfrom tqdm import *\n\nclass GameTags():\n\n def __init__(self):\n self.mb = mechanize.Browser()\n\n def getGameTags(self, appids):\n \"\"\"Get tags of games thats available in steam store\"\"\"\n\n mb = self.mb\n mb.open(\"http://store.steampowered.com/agecheck/app/{0}/\".format(10))\n mb.select_form(nr=1)\n mb.form['ageYear'] = [\"1900\"]\n mb.submit()\n\n tag_df = pd.DataFrame()\n\n for appid in tqdm(appids):\n tag_list = 0\n mb.open('http://store.steampowered.com/app/{0}/'.format(appid))\n soup = BeautifulSoup(mb.response().read(), \"html5lib\")\n tags = soup.find('div', 'glance_tags popular_tags')\n if tags == None:\n tags = soup.find('div', 'agegate_tags')\n if tags != None:\n tag_list = [t.text.strip().encode('utf8')\n for t in tags.findAll(\"a\", {\"href\": True})]\n tag_df = tag_df.append(pd.DataFrame([[appid, tag_list]]))\n print(tag_df)\n tag_df.columns = ['appid', 'tags']\n tag_df = tag_df[tag_df.tags != 0]\n tag_df.to_csv('Resources/addigamegenres.csv.gz', compression='gzip')\n return(tag_df)\n\n def converting(self):\n \"\"\"Convert genres from unicode to string format\"\"\"\n\n data = pd.read_csv('Resources/addigamegenres.csv.gz', compression='gzip')\n\n tags = []\n for i, id in enumerate(tqdm(data['appid'])):\n new = list(data['tags'][i].split(','))\n for i in new:\n tags.append((id, i.strip(\"[]''\"\" \")))\n\n newdata = pd.DataFrame(tags)\n newdata.to_csv('Resources/Genresadd.csv')\n\n\n# apps = pd.read_csv('Resources/allgames.csv.gz', compression='gzip')\n# gt.getGameTags(apps['appid'])" } ]
18
Harmionee/TeamProjects
https://github.com/Harmionee/TeamProjects
c6ac70861c6e5abc581d070d108ad15df8cc6d58
272743efa8db0e51fdb8ab33858b1ec7b6e0e310
2ca67e0af383048a58f2ef8f347848c6aeccc440
refs/heads/main
2023-01-28T13:26:05.363451
2020-12-07T22:48:22
2020-12-07T22:48:22
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6397058963775635, "alphanum_fraction": 0.6617646813392639, "avg_line_length": 16, "blob_id": "95541d09325165aedd91c6a81e19d8a35cbe3eda", "content_id": "d19455b61f874484c1f2d242310786004399bfcf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 272, "license_type": "no_license", "max_line_length": 51, "num_lines": 16, "path": "/Calculator /calculator.py", "repo_name": "Harmionee/TeamProjects", "src_encoding": "UTF-8", "text": "class Calculator():\n\"\"\"\ndocstring for Calculator\n\nDescription:\nThis is the main class.\n\n\n\nRecord:\n|Sr.No.| Name | Date | Changes made |\n|1.|Ansh Sharma|8th December,2020|Creation of the class|\n\"\"\"\n\tdef __init__(self, arg):\n\t\tsuper(Calculator, self).__init__()\n\t\tself.arg = arg\n" } ]
1
cindylyl/djpractice
https://github.com/cindylyl/djpractice
aedc11919bebce15ec04c53441112431b12a0cc6
c1c6efc37b3762fbf131aabf495406b9801e4164
60e625f1a656480a915d6ea53d5ba9d68fb92a6d
refs/heads/master
2021-08-10T21:58:50.599548
2020-05-30T21:13:34
2020-05-30T21:13:34
84,659,675
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6853932738304138, "alphanum_fraction": 0.7003745436668396, "avg_line_length": 20.360000610351562, "blob_id": "4a0ae4aa4af012fcbfa5b48a0ac518564646a55e", "content_id": "a25ad22a5f97e496c063cce0c537452a50b820a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 534, "license_type": "no_license", "max_line_length": 94, "num_lines": 25, "path": "/README.md", "repo_name": "cindylyl/djpractice", "src_encoding": "UTF-8", "text": "# Stephanos Kungfu Club Management System\n\n> I implemented functions such as signing up, logging in and condition search in this website.\n\n## homepage\n\n![img](http://i.imgur.com/blujR1f.png)\n\n## Sign up page\n![img](http://i.imgur.com/Ho2NIED.png)\n\n## Log in page\n![img](http://i.imgur.com/VT43ELn.png)\n\n## Profile page\n![img](http://i.imgur.com/lEL53Qq.png)\n\n## Condition search\n![img](http://i.imgur.com/uJtdczn.png)\n\nresult:\n![img](http://i.imgur.com/xIgsqfw.png)\n\n## Database management page\n![img](http://i.imgur.com/58gjSsK.png)\n" }, { "alpha_fraction": 0.688927948474884, "alphanum_fraction": 0.7152900099754333, "avg_line_length": 32.47058868408203, "blob_id": "c90d5b74027ec29e71aaebde3c3fee63232e257a", "content_id": "39c661a37a066d87d3a8cf82100a316d0130cb95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3414, "license_type": "no_license", "max_line_length": 84, "num_lines": 102, "path": "/TestModel/models.py", "repo_name": "cindylyl/djpractice", "src_encoding": "UTF-8", "text": "from datetime import date, time\n\nfrom django.db import models\n\n\nclass Testing(models.Model):\n def __str__(self):\n return self.t_name\n t_name = models.CharField(max_length=100)\n t_method = models.CharField(max_length=100)\n\n\nclass Student(models.Model):\n stu_fname = models.CharField(max_length=100)\n stu_lname = models.CharField(max_length=100)\n stu_birth_date = models.DateField('birthday', default=date(1990, 1, 1))\n stu_join_date = models.DateField('join date', default=date.today)\n\n\nclass Instructor(models.Model):\n ins_id = models.AutoField(primary_key=True)\n ins_name = models.CharField(max_length=20)\n ins_birth_date = models.DateField('instructor birthday', default=date(1980,1,1))\n ins_phone = models.CharField(max_length=30)\n ins_email = models.EmailField()\n ins_address = models.CharField(max_length=100)\n\n\nclass Class(models.Model):\n class_id = models.AutoField(primary_key=True)\n class_time = models.TimeField('class time',default=time(10,0,0))\n class_dayOfTheWeek = models.CharField(max_length=20)\n class_level = models.CharField(max_length=20)\n class_location = models.CharField(max_length=20)\n ins_id = models.ForeignKey(Instructor)\n\n\nclass Meeting(models.Model):\n meeting_id = models.AutoField(primary_key=True)\n meeting_date = models.DateField(default=date.today)\n class_id = models.ForeignKey(Class)\n\n\nclass Students(models.Model):\n #user = models.ForeignKey(User, on_delete=models.CASCADE)\n stu_id = models.AutoField(primary_key=True)\n stu_fname = models.CharField(max_length=100)\n stu_lname = models.CharField(max_length=100)\n stu_birth_date = models.DateField('birthday',default=date(1990,1,1))\n stu_join_date = models.DateField('join date',default=date.today)\n stu_phone = models.CharField(max_length=30,null=True)\n stu_email = models.EmailField(null=True)\n stu_address = models.CharField(max_length=100,null=True)\n meeting_id = models.ForeignKey(Meeting,default=4001)\n ins_id=models.ForeignKey(Instructor,default=2001)\n\n\nclass Parent(models.Model):\n par_id = models.AutoField(primary_key=True)\n par_phone= models.CharField(max_length=30)\n par_email = models.EmailField()\n stu_id = models.ForeignKey(Students)\n\n\nclass Rank(models.Model):\n rank_id = models.AutoField(primary_key=True)\n rank_name = models.CharField(max_length=20)\n rank_belt_color = models.CharField(max_length=20)\n\n\nclass Gain(models.Model):\n stu_id = models.ForeignKey(Students,null=True)\n rank_id = models.ForeignKey(Rank,default=5001)\n gain_date = models.DateField(default=date.today)\n\n class Meta:\n unique_together = (('stu_id', 'rank_id'),)\n\n\nclass Requirement(models.Model):\n req_id = models.AutoField(primary_key= True)\n req_info = models.CharField(max_length=100)\n rank_id = models.ForeignKey(Rank)\n\n\nclass Invoice(models.Model):\n inv_id = models.AutoField(primary_key=True)\n inv_date = models.DateField(default=date.today)\n inv_info = models.CharField(max_length=100)\n stu_id = models.ForeignKey(Students)\n\n\nclass Purchasing(models.Model):\n pur_id = models.AutoField(primary_key=True)\n pur_item_name= models.CharField(max_length=30)\n inv_id = models.ForeignKey(Invoice)\n\n\nclass User(models.Model):\n username=models.CharField(max_length=100)\n password=models.CharField(max_length=100)\n stu_id = models.ForeignKey(Students,null=True)\n" }, { "alpha_fraction": 0.7166344523429871, "alphanum_fraction": 0.7166344523429871, "avg_line_length": 34.06779479980469, "blob_id": "4d25f6b1ea40b80f321adc0e8d8393cbb73d3e98", "content_id": "4d64d2a1eb9925edde51f7582b1a0d96f34a64a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2068, "license_type": "no_license", "max_line_length": 114, "num_lines": 59, "path": "/TestModel/admin.py", "repo_name": "cindylyl/djpractice", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import User,Student,Students,Invoice,Instructor,Parent,Purchasing,Rank,Requirement,Meeting,Class,Gain\n\n# Register your models here.\nclass StuAdmin(admin.ModelAdmin):\n list_display = ('id','stu_fname','stu_lname','stu_birth_date','stu_join_date')\n search_fields = ('id','stu_fname','stu_lname','stu_birth_date','stu_join_date')\n\n\nclass StusAdmin(admin.ModelAdmin):\n list_display = ('stu_id','stu_fname','stu_lname','stu_birth_date','stu_join_date','stu_phone','stu_email',\n 'meeting_id','ins_id')\n\n\nclass InsAdmin(admin.ModelAdmin):\n list_display = ('ins_id','ins_name','ins_birth_date','ins_phone','ins_email','ins_address')\n\nclass MeetingAdmin(admin.ModelAdmin):\n list_display = ('meeting_id','meeting_date','class_id')\n\n\nclass InvAdmin(admin.ModelAdmin):\n list_display = ('inv_id','inv_date','inv_info','stu_id')\n\n\nclass PurAdmin(admin.ModelAdmin):\n list_display = ('pur_id','pur_item_name','inv_id')\n\n\nclass ParAdmin(admin.ModelAdmin):\n list_display = ('par_id','par_phone','par_email','stu_id')\n\nclass RankAdmin(admin.ModelAdmin):\n list_display = ('rank_id','rank_name','rank_belt_color')\n\nclass ReqAdmin(admin.ModelAdmin):\n list_display = ('req_id','req_info','rank_id')\n\nclass ClassAdmin(admin.ModelAdmin):\n list_display = ('class_id','class_time','class_dayOfTheWeek','class_level','class_location','ins_id')\n\nclass GainAdmin(admin.ModelAdmin):\n list_display = ('stu_id','rank_id','gain_date')\n\nclass userAdmin(admin.ModelAdmin):\n list_display = ('username','password','stu_id')\n\nadmin.site.register(User, userAdmin)\nadmin.site.register(Student, StuAdmin)\nadmin.site.register(Students, StusAdmin)\nadmin.site.register(Instructor, InsAdmin)\nadmin.site.register(Class, ClassAdmin)\nadmin.site.register(Meeting, MeetingAdmin)\nadmin.site.register(Parent,ParAdmin)\nadmin.site.register(Gain, GainAdmin)\nadmin.site.register(Rank, RankAdmin)\nadmin.site.register(Requirement, ReqAdmin)\nadmin.site.register(Invoice, InvAdmin)\nadmin.site.register(Purchasing, PurAdmin)" }, { "alpha_fraction": 0.5180180072784424, "alphanum_fraction": 0.5900900959968567, "avg_line_length": 21.200000762939453, "blob_id": "ec6bb9baaba0b45fce968f06d9257b845c19234f", "content_id": "873ab2211a961e5ca75bf93c1a0d32c3070b4bce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 444, "license_type": "no_license", "max_line_length": 49, "num_lines": 20, "path": "/TestModel/migrations/0003_auto_20170310_2053.py", "repo_name": "cindylyl/djpractice", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.5 on 2017-03-10 20:53\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('TestModel', '0002_auto_20170310_1937'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='class',\n old_name='class_lacation',\n new_name='class_location',\n ),\n ]\n" }, { "alpha_fraction": 0.5381903648376465, "alphanum_fraction": 0.5569917559623718, "avg_line_length": 29.945453643798828, "blob_id": "4a46e59af53dd41087746be21c920a00bcfd8463", "content_id": "42deeccee37310c39cf06b05168a68183b4d5f95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1702, "license_type": "no_license", "max_line_length": 70, "num_lines": 55, "path": "/TestModel/migrations/0010_auto_20170322_0018.py", "repo_name": "cindylyl/djpractice", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.5 on 2017-03-22 00:18\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('TestModel', '0009_auto_20170321_2305'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='class',\n name='class_id',\n field=models.AutoField(primary_key=True, serialize=False),\n ),\n migrations.AlterField(\n model_name='instructor',\n name='ins_id',\n field=models.AutoField(primary_key=True, serialize=False),\n ),\n migrations.AlterField(\n model_name='invoice',\n name='inv_id',\n field=models.AutoField(primary_key=True, serialize=False),\n ),\n migrations.AlterField(\n model_name='meeting',\n name='meeting_id',\n field=models.AutoField(primary_key=True, serialize=False),\n ),\n migrations.AlterField(\n model_name='parent',\n name='par_id',\n field=models.AutoField(primary_key=True, serialize=False),\n ),\n migrations.AlterField(\n model_name='purchasing',\n name='pur_id',\n field=models.AutoField(primary_key=True, serialize=False),\n ),\n migrations.AlterField(\n model_name='rank',\n name='rank_id',\n field=models.AutoField(primary_key=True, serialize=False),\n ),\n migrations.AlterField(\n model_name='requirement',\n name='req_id',\n field=models.AutoField(primary_key=True, serialize=False),\n ),\n ]\n" }, { "alpha_fraction": 0.6705882549285889, "alphanum_fraction": 0.686274528503418, "avg_line_length": 20.25, "blob_id": "ea7e597a64e1c6cdd4de1c0ea36c1cc9c9de7b09", "content_id": "9a3a7c15a02c7b6abcd636867cc3fbea69999850", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 255, "license_type": "no_license", "max_line_length": 55, "num_lines": 12, "path": "/TestModel/testdb.py", "repo_name": "cindylyl/djpractice", "src_encoding": "UTF-8", "text": "from django.http import HttpResponse\nfrom .models import Testing\n\n\ndef testdb(request):\n test1 = Testing(t_name='hahahah')\n test2 = Testing(t_method='oooo')\n\n test1.save()\n test2.save()\n\n return HttpResponse(\"<p>add data successfully</p>\")\n" }, { "alpha_fraction": 0.6641221642494202, "alphanum_fraction": 0.6641221642494202, "avg_line_length": 21, "blob_id": "bf2c7a8e45ae1b1e774099e0652f41259cef1cd8", "content_id": "1f2bd475b02865ad784f85ea1a15ec2b05418abf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 131, "license_type": "no_license", "max_line_length": 53, "num_lines": 6, "path": "/TestModel/urls.py", "repo_name": "cindylyl/djpractice", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\nfrom .views import profile\n\nurlpatterns =[\n url(r'^(?P<stu_id>\\d+)', profile, name='stu_id'),\n]" }, { "alpha_fraction": 0.5331302285194397, "alphanum_fraction": 0.5492764711380005, "avg_line_length": 43.96575164794922, "blob_id": "442a92703e515fc237129008e9a91cd90d0f2a6c", "content_id": "c28ae1f018846726dc7aa67899bc9a852c8002e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6565, "license_type": "no_license", "max_line_length": 133, "num_lines": 146, "path": "/TestModel/migrations/0002_auto_20170310_1937.py", "repo_name": "cindylyl/djpractice", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.5 on 2017-03-10 19:37\nfrom __future__ import unicode_literals\n\nimport datetime\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('TestModel', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Class',\n fields=[\n ('class_id', models.IntegerField(primary_key=True, serialize=False)),\n ('class_time', models.TimeField(default=datetime.time(10, 0), verbose_name='class time')),\n ('class_dayOfTheWeek', models.IntegerField()),\n ('class_level', models.CharField(max_length=20)),\n ('class_lacation', models.CharField(max_length=20)),\n ],\n ),\n migrations.CreateModel(\n name='Gain',\n fields=[\n ('stu_id', models.IntegerField(primary_key=True, serialize=False)),\n ('rank_id', models.IntegerField()),\n ('gain_date', models.DateField(default=datetime.date.today)),\n ],\n ),\n migrations.CreateModel(\n name='Instructor',\n fields=[\n ('ins_id', models.IntegerField(primary_key=True, serialize=False)),\n ('ins_name', models.CharField(max_length=20)),\n ('ins_birth_date', models.DateField(default=datetime.date(1980, 1, 1), verbose_name='instructor birthday')),\n ('ins_phone', models.CharField(max_length=30)),\n ('ins_email', models.EmailField(max_length=254)),\n ('ins_address', models.CharField(max_length=100)),\n ],\n ),\n migrations.CreateModel(\n name='Invoice',\n fields=[\n ('inv_id', models.IntegerField(primary_key=True, serialize=False)),\n ('inv_date', models.DateField(default=datetime.date.today)),\n ('inv_info', models.CharField(max_length=100)),\n ],\n ),\n migrations.CreateModel(\n name='Meeting',\n fields=[\n ('meeting_id', models.IntegerField(primary_key=True, serialize=False)),\n ('meeting_date', models.DateField(default=datetime.date.today)),\n ('class_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='TestModel.Class')),\n ],\n ),\n migrations.CreateModel(\n name='Parent',\n fields=[\n ('par_id', models.IntegerField(primary_key=True, serialize=False)),\n ('par_phone', models.CharField(max_length=30)),\n ('par_email', models.EmailField(max_length=254)),\n ],\n ),\n migrations.CreateModel(\n name='Purchasing',\n fields=[\n ('pur_id', models.IntegerField(primary_key=True, serialize=False)),\n ('pur_item_name', models.CharField(max_length=30)),\n ('inv_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='TestModel.Invoice')),\n ],\n ),\n migrations.CreateModel(\n name='Rank',\n fields=[\n ('rank_id', models.IntegerField(primary_key=True, serialize=False)),\n ('rank_name', models.CharField(max_length=20)),\n ('rank_belt_color', models.CharField(max_length=20)),\n ],\n ),\n migrations.CreateModel(\n name='Requirement',\n fields=[\n ('req_id', models.IntegerField(primary_key=True, serialize=False)),\n ('req_info', models.CharField(max_length=100)),\n ('rank_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='TestModel.Rank')),\n ],\n ),\n migrations.CreateModel(\n name='Student',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('stu_fname', models.CharField(max_length=100)),\n ('stu_lname', models.CharField(max_length=100)),\n ('stu_birth_date', models.DateField(default=datetime.date(1990, 1, 1), verbose_name='birthday')),\n ('stu_join_date', models.DateField(default=datetime.date.today, verbose_name='join date')),\n ],\n ),\n migrations.CreateModel(\n name='Students',\n fields=[\n ('stu_id', models.IntegerField(primary_key=True, serialize=False)),\n ('stu_fname', models.CharField(max_length=100)),\n ('stu_lname', models.CharField(max_length=100)),\n ('stu_birth_date', models.DateField(default=datetime.date(1990, 1, 1), verbose_name='birthday')),\n ('stu_join_date', models.DateField(default=datetime.date.today, verbose_name='join date')),\n ('stu_phone', models.CharField(max_length=30, null=True)),\n ('stu_email', models.EmailField(max_length=254, null=True)),\n ('stu_address', models.CharField(max_length=100, null=True)),\n ('ins_id', models.ForeignKey(default=2001, on_delete=django.db.models.deletion.CASCADE, to='TestModel.Instructor')),\n ('meeting_id', models.ForeignKey(default=3001, on_delete=django.db.models.deletion.CASCADE, to='TestModel.Meeting')),\n ],\n ),\n migrations.CreateModel(\n name='User',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('username', models.CharField(max_length=100)),\n ('password', models.CharField(max_length=100)),\n ],\n ),\n migrations.AddField(\n model_name='parent',\n name='stu_id',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='TestModel.Student'),\n ),\n migrations.AddField(\n model_name='invoice',\n name='stu_id',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='TestModel.Student'),\n ),\n migrations.AlterUniqueTogether(\n name='gain',\n unique_together=set([('stu_id', 'rank_id')]),\n ),\n migrations.AddField(\n model_name='class',\n name='ins_id',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='TestModel.Instructor'),\n ),\n ]\n" }, { "alpha_fraction": 0.6886967420578003, "alphanum_fraction": 0.693638026714325, "avg_line_length": 37.5476188659668, "blob_id": "c9b4bc62443cc540bfbac9777f19b07bd6c65341", "content_id": "e6bb372f980a3ed78f4fbee15a4c8e0e4c9c4de4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1619, "license_type": "no_license", "max_line_length": 82, "num_lines": 42, "path": "/djpractice/urls.py", "repo_name": "cindylyl/djpractice", "src_encoding": "UTF-8", "text": "\"\"\"djpractice URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom rest_framework import routers\n\n# from djpractice.view import hello\nfrom TestModel import views\nfrom TestModel.testdb import testdb\n\nrouter = routers.DefaultRouter()\nrouter.register(r'users', views.UserViewSet)\n# router.register(r'groups', views.GroupViewSet)\nrouter.register(r'students',views.StudentViewSet)\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^hello/$', views.hello),\n url(r'^testdb/$', testdb),\n #url(r'^profile/(?P<stu_id>\\d+)/$', profile,name='profile'),\n url(r'^profile/', include('TestModel.urls',namespace='profile')),\n url(r'^edit/', views.edit_profile),\n url(r'^login/', views.login, name='login'),\n url(r'^signup/', views.signup),\n url(r'^report/', views.report),\n url(r'^report_filter/',views.report_filter),\n url(r'^', include(router.urls)),\n url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))\n]\n" }, { "alpha_fraction": 0.6858552694320679, "alphanum_fraction": 0.6858552694320679, "avg_line_length": 26.636363983154297, "blob_id": "fe6fa70e055155d0ef69a4e2350ca985d9cdf83f", "content_id": "0a88d038aa44720540ca78c0283abfd1f0b18534", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 608, "license_type": "no_license", "max_line_length": 84, "num_lines": 22, "path": "/TestModel/serializers.py", "repo_name": "cindylyl/djpractice", "src_encoding": "UTF-8", "text": "from django.contrib.auth.models import User\nfrom rest_framework import serializers\n\nfrom .models import Students\n\n\nclass UserSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = User\n fields = ('url', 'username', 'email', 'groups')\n\n#\n# class GroupSerializer(serializers.HyperlinkedModelSerializer):\n# class Meta:\n# model = Group\n# fields = ('url', 'name')\n\n\nclass StudentSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = Students\n fields = ('stu_id','stu_fname','stu_lname','stu_birth_date','stu_join_date')\n" }, { "alpha_fraction": 0.5999095439910889, "alphanum_fraction": 0.6036796569824219, "avg_line_length": 34.271278381347656, "blob_id": "180065ee9d83ae2470c6a8f1767442ec77368821", "content_id": "5462f6d89b93d239253ead06b10395c76b5199e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6631, "license_type": "no_license", "max_line_length": 111, "num_lines": 188, "path": "/TestModel/views.py", "repo_name": "cindylyl/djpractice", "src_encoding": "UTF-8", "text": "from datetime import date\n\nfrom django.contrib import messages\nfrom django.db.models import Q\nfrom django.shortcuts import render, render_to_response, get_object_or_404, redirect\n# from django.contrib.auth.models import User, Group\nfrom rest_framework import viewsets\n\nfrom .models import Students, Gain, Rank, Invoice, User\nfrom .serializers import UserSerializer, StudentSerializer\n\n\n# Create your views here.\n\ndef hello(request):\n ctx = {}\n if request.POST:\n ctx['rlt'] = request.POST['q']\n return render(request, \"hello.html\", ctx)\n #return render(request, 'hello.html', context)\n\n\ndef profile(request,stu_id):\n stu = get_object_or_404(Students, pk=stu_id)\n rank = Rank.objects.filter(gain__stu_id=stu)\n print(rank)\n return render(request, \"profile.html\", {'stu': stu, 'rank': rank})\n\n\ndef edit_profile(request):\n return render(request, \"edit.html\")\n\n\ndef signup(request):\n ctx={}\n if request.POST:\n # get input value\n username = request.POST['username']\n password = request.POST['password']\n firstname = request.POST['fname']\n lastname = request.POST['lname']\n birth_y = int(request.POST['birthday_y'])\n birth_m = int(request.POST['birthday_m'])\n birth_d = int(request.POST['birthday_d'])\n # joindate_y = int(request.POST['joindate_y'])\n # joindate_m = int(request.POST['joindate_m'])\n # joindate_d = int(request.POST['joindate_d'])\n phone = request.POST['phone']\n email = request.POST['email']\n address = request.POST['address']\n\n # insert input to database\n stu = Students(stu_fname=firstname, stu_lname=lastname, stu_birth_date=date(birth_y, birth_m, birth_d),\n stu_join_date=date.today(), stu_phone=phone, stu_email=email,\n stu_address=address)\n stu.save()\n print(stu.stu_id)\n user = User(username=username,password=password,stu_id=stu)\n user.save()\n gain = Gain(stu_id=stu,gain_date=date.today(),rank_id=Rank.objects.get(rank_id=5001))\n gain.save()\n\n # return message\n messages.add_message(request, messages.INFO, \"sign up successfully, please log in...\")\n ctx['rlt']=\"sign up successfully\"\n return redirect('/login/')\n return render(request, 'signup.html',ctx)\n\n\ndef login(request):\n message = {}\n storage = messages.get_messages(request)\n\n for m in storage:\n message['signin_success'] = m\n break\n\n if request.POST:\n username = request.POST['username']\n password = request.POST['password']\n #usr = Students.objects.filter(user__password=password,user__username=username)\n usr = User.objects.filter(username=username,password=password)\n\n if len(usr) == 1:\n stu_id = usr.values()[0]['stu_id_id']\n #print(stu_id)\n #message['id']=stu_id\n #return redirect(reverse('profile',args=(stu_id,)))\n return redirect('/profile/{}'.format(stu_id))\n #return HttpResponseRedirect(reverse('profile',args=(stu_id,)))\n\n else:\n message['error_message']=\"incorrect username or password\"\n\n return render(request, 'login.html', message)\n\n\ndef report_filter(request):\n return render_to_response('report_filter.html')\n\n\ndef report(request):\n request.encoding = 'utf-8'\n\n # student_list = Student.objects.all().order_by('stu_join_date')\n # for k in request.GET:\n # message = request.GET[k]\n # #print(message)\n # if message is not '':\n # if k =='stu_fname':\n # student_list = student_list.filter(stu_fname=message)\n # elif k=='stu_join_date':\n # student_list = student_list.filter(stu_join_date__gt=message)\n # context ={\n # 'student_list': student_list\n # }\n\n #result_list={}\n active_students = Students.objects.all()\n student_list = Students.objects.all()\n invoice_list = Invoice.objects.all()\n gain_list = Gain.objects.all()\n\n belt_color =[]\n condition = []\n\n for k in request.GET:\n message = request.GET[k]\n if message is not '':\n if k == 'stu_fname':\n student_list = student_list.filter(stu_fname=message)\n condition.append('Student First Name: '+message)\n elif k =='stu_lname':\n student_list = student_list.filter(stu_lname=message)\n condition.append('Student Last Name: ' + message)\n elif k == 'join_date1':\n student_list = student_list.filter(stu_join_date__gte=message)\n elif k == 'join_date2':\n student_list = student_list.filter(stu_join_date__lte=message)\n elif k == 'belt_color':\n belt_color.append(message)\n elif k == 'day_of_the_week':\n student_list = student_list.filter(meeting_id__class_id__class_dayOfTheWeek=message)\n elif k == 'inv_date1':\n invoice_list = invoice_list.filter(inv_date__gte=message)\n elif k== 'inv_date2':\n invoice_list = invoice_list.filter(inv_date__lte=message)\n elif k== 'inv_info':\n invoice_list = invoice_list.filter(inv_info=message)\n if len(belt_color)==1:\n student_list = student_list.filter(gain__rank_id__rank_belt_color=belt_color[0])\n gain_list = gain_list.filter(rank_id__rank_belt_color=belt_color[0])\n elif len(belt_color)==2:\n student_list = student_list.filter(Q(gain__rank_id__rank_belt_color=belt_color[0]) |\n Q(gain__rank_id__rank_belt_color=belt_color[1]))\n gain_list = gain_list.filter(Q(rank_id__rank_belt_color=belt_color[0]) |\n Q(rank_id__rank_belt_color=belt_color[1]))\n\n\n\n context = {\n 'student_list':student_list,\n 'active_students': active_students,\n 'invoice_list': invoice_list,\n 'gain_list': gain_list\n }\n return render(request,'report.html',context)\n\n\nclass UserViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows users to be viewed or edited.\n \"\"\"\n queryset = User.objects.all().order_by('-date_joined')\n serializer_class = UserSerializer\n#\n#\n# class GroupViewSet(viewsets.ModelViewSet):\n# \"\"\"\n# API endpoint that allows groups to be viewed or edited.\n# \"\"\"\n# queryset = Group.objects.all()\n# serializer_class = GroupSerializer\n\n\nclass StudentViewSet(viewsets.ModelViewSet):\n queryset = Students.objects.all()\n serializer_class = StudentSerializer\n" }, { "alpha_fraction": 0.5787937641143799, "alphanum_fraction": 0.6021400690078735, "avg_line_length": 33.266666412353516, "blob_id": "58cae3aa9590c185a87e71df61163f8cb112e582", "content_id": "9784bab37b5ae77ae4ef885d999ef2c271da117d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1028, "license_type": "no_license", "max_line_length": 127, "num_lines": 30, "path": "/TestModel/migrations/0007_auto_20170311_0130.py", "repo_name": "cindylyl/djpractice", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.5 on 2017-03-11 01:30\nfrom __future__ import unicode_literals\n\nimport datetime\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('TestModel', '0006_delete_gain'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Gain',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('gain_date', models.DateField(default=datetime.date.today)),\n ('rank_id', models.ForeignKey(default=5001, on_delete=django.db.models.deletion.CASCADE, to='TestModel.Rank')),\n ('stu_id', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='TestModel.Students')),\n ],\n ),\n migrations.AlterUniqueTogether(\n name='gain',\n unique_together=set([('stu_id', 'rank_id')]),\n ),\n ]\n" }, { "alpha_fraction": 0.574999988079071, "alphanum_fraction": 0.6392857432365417, "avg_line_length": 25.66666603088379, "blob_id": "7780b982655b24c3135fe3112a28ee63d59705c8", "content_id": "2b3a41a5dba7093ec8629d28162ba0e3e3448580", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 560, "license_type": "no_license", "max_line_length": 119, "num_lines": 21, "path": "/TestModel/migrations/0011_auto_20170322_0320.py", "repo_name": "cindylyl/djpractice", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.5 on 2017-03-22 03:20\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('TestModel', '0010_auto_20170322_0018'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='students',\n name='meeting_id',\n field=models.ForeignKey(default=4001, on_delete=django.db.models.deletion.CASCADE, to='TestModel.Meeting'),\n ),\n ]\n" } ]
13
madan554/madhan1
https://github.com/madan554/madhan1
a79c1c7e77ac3be3071ff9db7c3a91444ba4c61f
520f2df427932912e1d82ce0805f50e0d97bbb3d
e7fcee8283f7f51eec1c94c4fd993c094aae7ed7
refs/heads/master
2023-04-07T12:19:34.659080
2021-04-04T11:03:11
2021-04-04T11:03:11
353,239,064
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7004680037498474, "alphanum_fraction": 0.7004680037498474, "avg_line_length": 23.653846740722656, "blob_id": "4ecc3ee7966507541ec16bf3586fd2217db8c49c", "content_id": "f71fbfdacb09668085f2898e44493134fe5ad7e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 641, "license_type": "no_license", "max_line_length": 56, "num_lines": 26, "path": "/Day-10/Emp/views.py", "repo_name": "madan554/madhan1", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\n\n# Create your views here.\n\ndef home(request):\n\treturn render(request,'html/home.html')\n\ndef about(request):\n\treturn render(request,'html/about.html')\n\ndef contact(request):\n\treturn render(request,'html/contact.html')\n\ndef login(request):\n\treturn render(request,'html/login.html')\n\ndef register(request):\n\tif request.method == \"POST\":\n\t\tn=request.POST['uname']\n\t\tct=request.POST['cn']\n\t\tadd=request.POST['ad']\n\t\te=request.POST['eml']\n\t\ta=request.POST['ag']\n\t\td={'un':n,'con':ct,'as':add,'em':e,'age':a}\n\t\treturn render (request,'html/details.html',{'hari':d})\n\treturn render(request,'html/register.html')\n" } ]
1
sadeghieh/PersianNLP
https://github.com/sadeghieh/PersianNLP
0d4e14ec9e66299c3fa44a02a7a45d07e5c423d6
8479a11f848b3e9cff55e5feff8ee5346e1eb36e
15bda869b70a17723cad2b16dd9dfbe8876cf5ac
refs/heads/master
2020-04-04T20:39:22.323766
2018-12-02T04:29:41
2018-12-02T04:29:41
156,255,023
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5044902563095093, "alphanum_fraction": 0.5273375511169434, "avg_line_length": 40.37704849243164, "blob_id": "03613c3e69804ccd7048b747af87a79c221f412b", "content_id": "ab053f32497573d8c7de0e771147d47af8596e09", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7975, "license_type": "no_license", "max_line_length": 118, "num_lines": 183, "path": "/normalizer.py", "repo_name": "sadeghieh/PersianNLP", "src_encoding": "UTF-8", "text": "import re\n\n\nclass Normalizer:\n\n supported_languages = ['fa', 'en']\n\n def __init__(self, language: str = 'fa'):\n if self.is_supported(language):\n self.locale = language\n else:\n # revert to the default locale\n self.locale = 'fa'\n\n punct_range = self.get_unicode_range('punctuation')\n alphabet_range = self.get_unicode_range('alphabet')\n numeral_range = self.get_unicode_range('numerals')\n self.punctuation = punct_range[self.locale]\n self.alphabet = alphabet_range[self.locale]\n self.digits = numeral_range[self.locale]\n\n @staticmethod\n def get_unicode_range(boundary: str = None) -> dict:\n arabic_w = u'\\u0621-\\u063A\\u0641-\\u064A' # ARABIC ALPHABET\n arabic_w += u'\\u067E\\u0686\\u0698\\u06A9\\u06AF\\u06CC\\u0654' # EXTENDED ARABIC LETTERS\n\n # preserve order where mapping across languages may be applicable, e.g. numbers or punctuation marks\n ranges = \\\n {'punctuation': {'fa': r'\"\\/\\؟!٪()،؛:.', 'en': r'\"\\/\\?!%(),;:.'},\n 'numerals': {'fa': '۰۱۲۳۴۵۶۷۸۹', 'en': '0123456789', 'ar': '٠١٢٣٤٥٦٧٨٩'},\n 'alphabet': {'fa': arabic_w, 'en': r'[a-zA-Z]'},\n 'diacritics': {'fa': u'\\u0610-\\u061A\\u064B-\\u065F'}}\n\n return ranges[boundary] if boundary else ranges\n\n def get_punctuation(self) -> str:\n return self.punctuation\n\n def get_numerals(self) -> str:\n return self.digits\n\n def get_alphabet(self) -> str:\n return self.alphabet\n\n def is_supported(self, lang: str) -> bool:\n return True if lang in self.supported_languages else False\n\n def trim_whitespace(self, text: str) -> str:\n return re.sub('(\\s){2,}', '\\g<1>', text)\n\n def filter_xml_tags(self, text: str) -> str:\n return re.sub(r'<.*?>', '', text)\n\n def filter_url(self, text: str, rep: str = ''):\n return re.sub(r'https?:\\/\\/\\S+\\/?', rep, text)\n\n def localize_punc(self, text: str, sep: str = ' ') -> str:\n out_charset = self.get_punctuation()\n punct_marks = self.get_unicode_range('punctuation')\n # a list of punctuation marks not used by the current locale\n in_charsets = [punct_marks[lang] for lang in punct_marks if lang != self.locale]\n\n for i in range(len(in_charsets)):\n tbl = str.maketrans(in_charsets[i], out_charset)\n text = text.translate(tbl)\n\n if sep:\n text = re.sub('(?<!'+ sep +')([' + out_charset + ']){1,3}', sep + '\\g<0>', text)\n text = re.sub('([' + out_charset + ']){1,3}(?!'+ sep +')', '\\g<0>' + sep, text)\n\n return text\n\n def localize_digits(self, text: str) -> str:\n out_charset = self.get_numerals()\n numerals = self.get_unicode_range('numerals')\n\n # a list of digit characters not used by the current locale\n in_charsets = [numerals[lang] for lang in numerals if lang != self.locale]\n\n for i in range(len(in_charsets)):\n tbl = str.maketrans(in_charsets[i], out_charset)\n text = text.translate(tbl)\n\n return text\n\n\nclass Persianizer(Normalizer):\n\n def localize_punc(self, text: str, sep: str = ' '):\n tbl = str.maketrans('«»', '\"\"')\n text = text.translate(tbl)\n return super(Persianizer, self).localize_punc(text)\n\n @staticmethod\n def get_affixes(affix_type: str = 'all') -> dict:\n affixes = \\\n {'suffix_INFL': ['های*', 'ها ای', 'ای+', 'ی+', 'مان', 'تان', 'شان', 'تری?', 'ترین',\n 'هایم', 'هایت', 'هایش', 'هایمان', 'هایتان', 'هایشان', 'ام', 'ات', 'اش'],\n 'suffix_LEX': ['ای', 'اید', 'ام', 'ایم', 'اند', 'جاتی?', 'آوری?', 'نشینی?', 'کننده', 'کنندگی', 'کنندگان',\n 'پاشی?', 'پوشی?', 'پوشانی?', 'شناسی?', 'شناسانی?', 'پذیری?', 'پذیرانی?', 'ناپذیری?',\n 'شکنی?', 'شکنانی?', 'فشانی?', 'سازی?', 'آلودی?', 'آمیزی?', 'زدای*', 'خوردگی', 'زدگی',\n 'انگیزی?', 'خیزی?', 'سوزی?', 'پراکنی', 'خوری', 'افکنی?', 'دانی?', 'گرفتگی', 'المللی?',\n 'پروری?', 'پریشی?', 'نویسی?', 'وار', 'واره', 'کارانی?', 'پژوهی?', 'سنجی?', 'بانی?',\n 'کنان', 'پردازی?', 'رسانی?', 'یابی?', 'پیما', 'گری?', 'گیری?', 'مندی?', 'ساعته',\n 'ور', 'اندازی?', 'مندی?', 'مندانی?'],\n 'prefix_INFL': ['ن?می'],\n 'prefix_LEX': ['نا', 'بی', 'فرا', 'سوء', 'غیر'],\n 'circum_LEX': ['هم\\s\\S+?ی']}\n\n if affix_type != 'all':\n return {key: value for key, value in affixes.items() if key.startswith(affix_type)}\n else:\n return affixes\n\n def normalize_affixation(self, text: str, affix_type: str = 'all', p_sep: str = '', s_sep: str = '') -> str:\n affix_type = [affix_type]\n types = affix_type if affix_type[0] != 'all' else ['prefix', 'suffix']\n affixes = {}\n for t in types:\n affixes.update(self.get_affixes(t))\n\n for affix_type, affix_list in affixes.items():\n if affix_type.startswith('prefix'):\n for prefix in affix_list:\n pattern = r'(?:^|(?<=\\W))({})\\W+'\n pattern = pattern.format(prefix)\n text = re.sub(pattern, '\\g<1>' + p_sep, text)\n elif affix_type.startswith('suffix'):\n for suffix in affix_list:\n pattern = r'\\W+({})(?=\\W)'\n pattern = pattern.format(suffix)\n text = re.sub(pattern, s_sep + '\\g<1>', text)\n elif affix_type.startswith('circum'):\n for circum in affix_list:\n old_words = re.findall(circum, text)\n for word in old_words:\n text = text.replace(word, re.sub(r'\\s', '', word))\n\n return text\n\n def filter_zwnj(self, text: str, replace: str = '') -> str:\n return text.replace('\\u200c', replace)\n\n def filter_diacritics(self, text: str) -> str:\n diacritics = self.get_unicode_range('diacritics')\n return re.sub('[{}]'.format(diacritics[self.locale]), '', text)\n\n def filter_foreign(self, text: str) -> str:\n # TODO: use class internal methods and attributes\n arabic = '\\s'\n arabic += '\\u060C\\u061B\\u061F\\u06D4' # ARABIC COMMA SEMICOLON QUESTION FULLSTOP\n arabic += '\\u064B-\\u0652' # ARABIC DIACRITICS\n arabic += '\\u0621-\\u063A\\u0641-\\u064A' # ARABIC ALPHABET\n arabic += '\\u067E\\u0686\\u0698\\u06A9\\u06AF\\u06CC\\u0654'\n arabic += '\\u0660-\\u0669\\u06F0-\\u06F9' # ARABIC DIGITS\n arabic += '\\u0640' # ARABIC TATWEEL\n\n non_arabic = '[^' + arabic + ']'\n return re.sub(non_arabic, '', text)\n\n def trim_whitespace(self, text: str) -> str:\n return super(Persianizer, self).trim_whitespace(text)\n\n def filter_tatvil(self, text: str) -> str:\n tatweel = '\\u0640'\n return text.replace(tatweel, '')\n\n def filter_yah_ezafe(self, text: str) -> str:\n pass\n\n def normalize_hamza(self, text: str) -> str:\n text = re.sub(r'(?<=['+'آاوی'+'])'+'ء'+'(?=[\\s])', '', text)\n text = re.sub(r'(?<![\\s\\u200C])' + 'آ', 'ا', text)\n\n mapping = str.maketrans('إأئؤ', 'اایو')\n return text.translate(mapping)\n\n def normalize_arabic_letters(self, text: str) -> str:\n mapping = str.maketrans('يكة', 'یکه')\n return text.translate(mapping)\n\n def filter_nonsense(self, text: str, preserve: str = '') -> str:\n pass\n" }, { "alpha_fraction": 0.5687984228134155, "alphanum_fraction": 0.582848846912384, "avg_line_length": 56.33333206176758, "blob_id": "50bcc488a5578278478bfe656a74b5ce07b4946c", "content_id": "40ffd4cae94fb046c7b5a84418109ffde0e4c362", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2072, "license_type": "no_license", "max_line_length": 118, "num_lines": 36, "path": "/main.py", "repo_name": "sadeghieh/PersianNLP", "src_encoding": "UTF-8", "text": "import os, re\nfrom normalizer import Persianizer\n\nif __name__ == '__main__':\n SOURCE_DATA_PATH = './data/'\n OUTPUT_DATA_PATH = './out/'\n if not os.path.exists(OUTPUT_DATA_PATH):\n os.mkdir(OUTPUT_DATA_PATH)\n\n p = Persianizer()\n for file in os.listdir(SOURCE_DATA_PATH):\n filename = file.split('.')\n \"\"\" Only open document files, ignores OSX system dot files/folders \"\"\"\n if filename[-1] in ['txt', 'xml', 'html']:\n with open(SOURCE_DATA_PATH + file, 'rt', encoding='utf-8') as f:\n raw = f.read()\n print('Processing file {} ... '.format(file), end='')\n text = p.filter_xml_tags(raw) # Remove HTML/XML tags\n text = re.sub(r'\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}Z', '', text) ## Remove Timestamp\n text = re.sub(r'([\"*]){2,}', '\\g<1>', text) # Remove duplicate special characters\n text = p.filter_url(text) # Remove URLs\n text = p.trim_whitespace(text) # Remove extra whitespace\n text = p.filter_diacritics(text) # Remove diacritics\n text = p.filter_tatvil(text) # Remove ARABIC/PERSIAN TATVIL (-) character\n text = p.normalize_arabic_letters(text) # Replace Arabic 'يكة' code-points\n text = p.localize_digits(text) # Replace Non-Persian digit code-points\n text = p.normalize_hamza(text) # Normalize إأئؤ and ء\n text = p.localize_punc(text) # Normalize punctuation marks, optionally wrap with space\n text = p.filter_zwnj(text, ' ') # Remove or Replace zwnj with any other character (space in this case)\n text = p.normalize_affixation(text) # Normalize the use of inflectional and lexical suffixes and prefixes\n print('{}Done{}'.format('\\033[92m\\033[1m', '\\033[0m\\033[0m'))\n\n new_filename = ''.join(filename[:-1]) + '.txt'\n with open(OUTPUT_DATA_PATH + new_filename, 'wt', encoding='utf-8') as f:\n print('Saving normalized file as {}'.format(new_filename))\n f.write(text)\n" } ]
2
mloumeau/Discrete-Mathematics
https://github.com/mloumeau/Discrete-Mathematics
9781e2b31c53fad91a805e2f9c99c5f57a95b544
edc1c68da5569d719d07be1af02873fe83da3e06
e19cbc5a577a36558ffc362680506b0a1bd1cc69
refs/heads/main
2023-02-10T16:08:19.153121
2020-12-31T18:46:36
2020-12-31T18:46:36
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5063678026199341, "alphanum_fraction": 0.5150279998779297, "avg_line_length": 21.369047164916992, "blob_id": "089558390927cee59ecf09b43c0ce1ac042cc21b", "content_id": "2d35bbf06e2482932b5be66581ffbcad69ffb206", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1963, "license_type": "no_license", "max_line_length": 54, "num_lines": 84, "path": "/PPP10.py", "repo_name": "mloumeau/Discrete-Mathematics", "src_encoding": "UTF-8", "text": "import random\r\n\r\ngraph = {'A': ['B', 'C', 'D'],\r\n 'B': ['A', 'C'],\r\n 'C': ['A', 'B', 'E'],\r\n 'D': ['A', 'F'],\r\n 'E': ['F', 'C'],\r\n 'F': ['D', 'E']}\r\n\r\nladderGraph = {'A': ['B', 'C'],\r\n 'B': ['A', 'D'],\r\n 'C': ['A', 'D', 'E'],\r\n 'D': ['B', 'C', 'F'],\r\n 'E': ['C', 'F'],\r\n 'F': ['D', 'E']}\r\n\r\nbullGraph = {'A': ['B'],\r\n 'B': ['C', 'D'],\r\n 'C': ['B', 'D'],\r\n 'D': ['B', 'C', 'E'],\r\n 'E': ['D']}\r\n\r\n\r\ndef getNodes(graphChoice):\r\n nodes = list(graphChoice.keys())\r\n return nodes\r\n\r\ndef getUnvisited(nodes):\r\n return nodes[1:]\r\n\r\ndef getVisited(nodes):\r\n return [nodes[0]]\r\n\r\ndef createPotential():\r\n for node in visited:\r\n #Remove hardcode\r\n for adjacency in bullGraph[node]:\r\n if adjacency in unvisited:\r\n return (node, adjacency)\r\n return 0\r\n\r\ndef createSpanningTree(keyword):\r\n potential = createPotential() \r\n if not potential:\r\n return\r\n \r\n visited.append(potential[1])\r\n unvisited.remove(potential[1]) \r\n return potential\r\n\r\n\r\nnodesBull = getNodes(bullGraph)\r\nunvisited = getUnvisited(nodesBull)\r\n\r\nrandom.shuffle(nodesBull)\r\n\r\nvisited = getVisited(nodesBull)\r\n\r\nanswer1 = list(map(createSpanningTree, nodesBull))\r\nanswer1 = [x for x in answer1 if x != None]\r\n\r\n# nodes = getNodes(graph)\r\n# unvisited = getUnvisited(nodes)\r\n\r\n# random.shuffle(nodes)\r\n\r\n# visited = getVisited(nodes)\r\n\r\n# answer2 = list(map(createSpanningTree, nodes))\r\n# answer2[:] = [x for x in answer2 if x != None]\r\n\r\n# nodesLadder = getNodes(ladderGraph)\r\n# unvisited = getUnvisited(nodesLadder)\r\n\r\n# random.shuffle(nodesLadder)\r\n\r\n# visited = getVisited(nodesLadder)\r\n\r\n# answer3 = list(map(createSpanningTree, nodesLadder))\r\n# answer3[:] = [x for x in answer3 if x != None]\r\n\r\nprint(answer1)\r\n# print(answer2)\r\n# print(answer3)\r\n" }, { "alpha_fraction": 0.5657051205635071, "alphanum_fraction": 0.5811966061592102, "avg_line_length": 25.130434036254883, "blob_id": "f6bcdc793756d4fb019d6378b0f46239d673674a", "content_id": "27054c7869c7de5d3ad721fe2a8ec3532d481005", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1872, "license_type": "no_license", "max_line_length": 126, "num_lines": 69, "path": "/PPP11.py", "repo_name": "mloumeau/Discrete-Mathematics", "src_encoding": "UTF-8", "text": "from graph_test_dict import graph_test_dict\r\n\r\ndef getEdgeListFromFile(fileName):\r\n edgeList = []\r\n try:\r\n file = open(fileName)\r\n \r\n for x in file:\r\n edgeList.append(tuple(map(int, x.split(\" \"))))\r\n\r\n file.close()\r\n except:\r\n print(\"ERROR: Issue reading file named\", fileName)\r\n return None\r\n return edgeList\r\n\r\ndef buildGraphFromEdges(edgeList):\r\n graph = [[], []]\r\n for x in edgeList:\r\n if not (x[0] in graph[0]):\r\n graph[0].append(x[0])\r\n if not (x[1] in graph[0]):\r\n graph[0].append(x[1])\r\n graph[1].append(x)\r\n\r\n return graph\r\n\r\ndef getGraphFromFile(fileName):\r\n edgeList = getEdgeListFromFile(fileName)\r\n\r\n if edgeList == None:\r\n return None\r\n return buildGraphFromEdges(edgeList)\r\n\r\ndef get_adjacency_list(graph):\r\n adjList = {}\r\n\r\n for x in graph[0]:\r\n adjList[x] = []\r\n \r\n for x in graph[1]:\r\n adjList[x[0]].append(x[1])\r\n adjList[x[1]].append(x[0])\r\n\r\n return adjList\r\n\r\ndef link_exists(adjList, node1, node2):\r\n return (node1 in adjList[node2]) or (node2 in adjList[node1])\r\n\r\n\r\ndef check_clique_or_anti_clique(graph, adjList, nodes, anti):\r\n for x in range(0, len(nodes) - 1):\r\n for y in range(x + 1, len(nodes)):\r\n if link_exists(adjList, nodes[x], nodes[y]) == anti:\r\n return False\r\n return True\r\n\r\ndef testFunctions(graph, adjList, nodes):\r\n print(check_clique_or_anti_clique(graph, adjList, nodes, False), check_clique_or_anti_clique(graph, adjList, nodes, True))\r\n\r\n\r\ngraphs=[]\r\nadjList=[]\r\nfor i in range(6):\r\n graphs.append(getGraphFromFile(f\"graph{i+1}.in\"))\r\n adjList.append(get_adjacency_list(graphs[i]))\r\n print(f\"GRAPH {i+1}:\")\r\n for y in range(4):\r\n testFunctions(graphs[i], adjList[i], graph_test_dict[i+1][y])\r\n" }, { "alpha_fraction": 0.5314685106277466, "alphanum_fraction": 0.6258741021156311, "avg_line_length": 20.153846740722656, "blob_id": "5446f8e1cbd67519918598c4fef87df0df4bf6cc", "content_id": "03759a4bf9567d9b8c09a1f4b5bdccc52ad09c62", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 286, "license_type": "no_license", "max_line_length": 35, "num_lines": 13, "path": "/PPP12.py", "repo_name": "mloumeau/Discrete-Mathematics", "src_encoding": "UTF-8", "text": "from math import ceil as c\r\n\r\ndef calcInitials(n):\r\n # YOUR CODE GOES HERE\r\n return n**2 * (n+1) * 2**6\r\n\r\ndef hole_pigeon(p):\r\n # YOUR CODE GOES HERE\r\n return c(p/calcInitials(26))\r\n\r\npopulation = 1826160\r\nprint(hole_pigeon(population))\r\n# print(c(1826160/(26**2*27*2**6)))" }, { "alpha_fraction": 0.5587307214736938, "alphanum_fraction": 0.6072931289672852, "avg_line_length": 23.81900405883789, "blob_id": "a36b6ea38b28ebcbf46e0399533ad01ef068cbfd", "content_id": "9188ba14badc24d1034f2b532918b2f722acfa3c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5704, "license_type": "no_license", "max_line_length": 108, "num_lines": 221, "path": "/PPP04.py", "repo_name": "mloumeau/Discrete-Mathematics", "src_encoding": "UTF-8", "text": "import sympy\r\nfrom math import *\r\n\r\n\r\n# Extra Helper functions:\r\n\r\n# Computes least common multiple.\r\n# Taken from https://stackoverflow.com/questions/51716916/built-in-module-to-calculate-least-common-multiple\r\n# Also, aparently there is a lcm() in Python 3.9, but I'm not using Python 3.9.\r\ndef lcm(a, b):\r\n return abs(a*b) // gcd(a, b)\r\n\r\n\r\n\r\n\r\n# Hash generaton:\r\n\r\n# Maps a single element to a prime\r\ndef mapElmToPrime(elm):\r\n return sympy.prime(elm)\r\n\r\n# Maps set elements to prime numbers.\r\ndef encodeSet (nSet):\r\n newset = list()\r\n for x in nSet:\r\n newset.append(sympy.prime(x))\r\n return newset\r\n\r\n# Computes hash based on prime number mapping.\r\ndef hashEncodingSet (nSet):\r\n hash = 1\r\n for x in nSet:\r\n hash *= x\r\n return hash\r\n\r\n# Gets Godel Hash for a set.\r\ndef hashSet(nSet):\r\n return hashEncodingSet(encodeSet(nSet))\r\n\r\n\r\n\r\n\r\n# Hash set functions:\r\n\r\n# Tests set membership. If element is a member or subset of the.\r\n# hash, it returns True. Else, it returns False.\r\ndef hashIsMember(hash, member):\r\n if ((hash % member) == 0):\r\n return True\r\n else:\r\n return False\r\n\r\n# Returns the union of two hashed sets as a hash.\r\ndef hashUnion(hash1, hash2):\r\n return lcm(hash1, hash2)\r\n\r\n# Returns intersection of two hashed sets as a hash.\r\ndef hashIntersect(hash1, hash2):\r\n return gcd(hash1, hash2)\r\n\r\n# Adds an element to the set if not already included.\r\ndef hashAddElm (hash, addElm):\r\n # Ensuring the element is prime guards against problematic subsets.\r\n # For exaple, if hash represents {2 4 5} and addElm represents {2 3}\r\n # this code could block it rather than multiplying {2}'s prime\r\n # representation to hash a second time.\r\n if ((not hashIsMember(hash, addElm)) and sympy.isprime(addElm)):\r\n return hash * addElm\r\n else:\r\n return hash\r\n\r\n# Removes an element from the set if included.\r\n# Unlike hashAddElm, rmvElm can be a subset.\r\ndef hashRmvElm(hash, rmvElm):\r\n if (hashIsMember(hash, rmvElm)):\r\n return hash / rmvElm\r\n else:\r\n return hash\r\n\r\n# Gets the compliment of hash1 by hash2.\r\ndef hashCompliment(hash1, hash2):\r\n return hash1 / gcd(hash1, hash2)\r\n\r\n\r\n\r\n\r\n# Godel hashes in poset\r\n# According to a post on the DM2 discussion board by Jacob Barnes,\r\n# we needed to code some way to show that relations can be encoded as\r\n# sets of pairs. This is my attempt to fulfill that requirement.\r\ndef encodeHashRelation():\r\n relation = list()\r\n baseSet = [[], [1], [2], [3], [1, 2], [1, 3], [2, 3], [1, 2, 3]]\r\n for x in baseSet:\r\n for y in baseSet:\r\n if (hashIsMember(hashSet(y), hashSet(x))):\r\n relation.append((hashSet(x), hashSet(y)))\r\n return relation\r\n\r\n\r\n\r\n\r\n# Tests:\r\nprint(\"Test 0, relations as sets of pairs:\")\r\nprint(encodeHashRelation())\r\n\r\n\r\nif (hashSet((1, 3, 4)) == hashSet((1, 3, 4))):\r\n print(\"Test 1 Passed\")\r\nelse:\r\n print(\"Test 1 Failed\")\r\n\r\n\r\nif (hashIsMember(hashSet((1, 3, 4)), mapElmToPrime(3)) == True):\r\n print(\"Test 2 Passed\")\r\nelse:\r\n print(\"Test 2 Failed\")\r\n\r\n\r\nif (hashIsMember(hashSet((14, 3, 4, 6, 7)), mapElmToPrime(9)) == False):\r\n print(\"Test 3 Passed\")\r\nelse:\r\n print(\"Test 3 Failed\")\r\n\r\n\r\nif (hashIsMember(hashSet((1, 3, 4)), hashSet((1, 3))) == True):\r\n print(\"Test 4 Passed\")\r\nelse:\r\n print(\"Test 4 Failed\")\r\n\r\n\r\nif (hashUnion(hashSet((19, 45, 6)), hashSet((5, 2, 3))) == hashSet((2, 3, 5, 6, 19, 45))):\r\n print(\"Test 5 Passed\")\r\nelse:\r\n print(\"Test 5 Failed\")\r\n\r\n\r\nif (hashUnion(hashSet((19, 45, 6)), hashSet((6, 19))) == hashSet((6, 19, 45))):\r\n print(\"Test 6 Passed\")\r\nelse:\r\n print(\"Test 6 Failed\")\r\n\r\n\r\nif (hashUnion(hashSet((19, 45, 6)), mapElmToPrime(7)) == hashSet((6, 19, 7, 45))):\r\n print(\"Test 7 Passed\")\r\nelse:\r\n print(\"Test 7 Failed\")\r\n\r\n\r\nif (hashIntersect(hashSet((3, 4, 5, 2, 7, 1, 8)), hashSet((4, 5, 10, 7))) == hashSet((4, 5, 7))):\r\n print(\"Test 8 Passed\")\r\nelse:\r\n print(\"Test 8 Failed\")\r\n\r\n\r\nif (hashIntersect(hashSet((4, 8, 15, 21)), hashSet((4, 8, 21, 15))) == hashSet((4, 15, 8, 21))):\r\n print(\"Test 9 Passed\")\r\nelse:\r\n print(\"Test 9 Failed\")\r\n\r\n\r\nif (hashIntersect(hashSet((4, 8, 15)), mapElmToPrime(6)) == 1):\r\n print(\"Test 10 Passed\")\r\nelse:\r\n print(\"Test 10 Failed\")\r\n\r\n\r\nif (hashAddElm(hashSet((1, 3, 5)), mapElmToPrime(3)) == hashSet((1, 3, 5))):\r\n print(\"Test 11 Passed\")\r\nelse:\r\n print(\"Test 11 Failed\")\r\n\r\n\r\nif (hashAddElm(hashSet((1, 3, 5)), mapElmToPrime(4)) == hashSet((1, 3, 4, 5))):\r\n print(\"Test 12 Passed\")\r\nelse:\r\n print(\"Test 12 Failed\")\r\n\r\n\r\n# hashAddElm cannot add two sets together. Use union.\r\nif (hashAddElm(hashSet((1, 3, 5)), hashSet((3, 4))) == hashSet((1, 3, 5))):\r\n print(\"Test 13 Passed\")\r\nelse:\r\n print(\"Test 13 Failed\")\r\n\r\n\r\nif (hashRmvElm(hashSet((4, 6, 2)), mapElmToPrime(5)) == hashSet((4, 2, 6))):\r\n print(\"Test 14 Passed\")\r\nelse:\r\n print(\"Test 14 Failed\")\r\n\r\n\r\nif (hashRmvElm(hashSet((4, 6, 2)), mapElmToPrime(2)) == hashSet((4, 6))):\r\n print(\"Test 15 Passed\")\r\nelse:\r\n print(\"Test 15 Failed\")\r\n\r\n\r\n# hashRmvElm can remove a subset of the set without issue.\r\nif (hashRmvElm(hashSet((4, 6, 2)), hashSet((6, 2))) == mapElmToPrime(4)):\r\n print(\"Test 16 Passed\")\r\nelse:\r\n print(\"Test 16 Failed\")\r\n\r\n\r\nif (hashCompliment(hashSet((6, 7, 3)), hashSet((5, 4, 19))) == hashSet((7, 6, 3))):\r\n print(\"Test 17 Passed\")\r\nelse:\r\n print(\"Test 17 Failed\")\r\n\r\n\r\nif (hashCompliment(hashSet((6, 7, 3)), hashSet((5, 3, 19))) == hashSet((7, 6))):\r\n print(\"Test 18 Passed\")\r\nelse:\r\n print(\"Test 18 Failed\")\r\n\r\n\r\nif (hashCompliment(hashSet((6, 7, 3)), hashSet((6, 7, 3))) == 1):\r\n print(\"Test 19 Passed\")\r\nelse:\r\n print(\"Test 19 Failed\")" }, { "alpha_fraction": 0.5468208193778992, "alphanum_fraction": 0.586705207824707, "avg_line_length": 27.288135528564453, "blob_id": "01f235eee174cb4c0d408ffc312679979a16f2ad", "content_id": "ac0d67c2843852a7402b97e913522cf7323fb278", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1730, "license_type": "no_license", "max_line_length": 120, "num_lines": 59, "path": "/PPP07.py", "repo_name": "mloumeau/Discrete-Mathematics", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Oct 27 19:20:43 2020\r\n\r\n@author: matth\r\n\"\"\"\r\nimport sympy\r\nfrom math import gcd\r\n\r\n# for x in range(1,151):\r\n# print((pow(x, 341) - x) % 341) \r\n \r\n# def findCoprimes(n):\r\n# coprimeList = []\r\n# for x in range(1,101):\r\n# if gcd(x,n) == 1:\r\n# coprimeList.append(x)\r\n# return coprimeList\r\n\r\n\r\n# for n in range (1000, 10000):\r\n# for x in findCoprimes(n):\r\n# if ((x**n) - x) % n == 0:\r\n# print(n)\r\n# break\r\n \r\ndef factorization(num):\r\n all_factors = []\r\n for i in range(1, num + 1):\r\n if num % i == 0:\r\n all_factors.append(i)\r\n return all_factors\r\n\r\ndef check_prime_largest(num):\r\n largest = num[0]\r\n for x in num:\r\n if sympy.isprime(x):\r\n if largest < x:\r\n largest = x\r\n \r\n return largest\r\n\r\nCEP = (sympy.prime(1224))\r\nNYEP = (sympy.prime(1231))\r\n\r\nlargestPrimeC = check_prime_largest(factorization(CEP - 1))\r\nlargestPrimeN = check_prime_largest(factorization(NYEP - 1))\r\n\r\nprint(\"Test 1 (Christmas Eve Prime)\")\r\nprint(\"Largest Prime Factor of p - 1:\", largestPrimeC)\r\nprint(\"Largest Prime Factor of Largest Prime Factor of (p - 1):\", check_prime_largest(factorization(largestPrimeC - 1)))\r\nprint(\"Largest Prime Factor of p + 1:\", check_prime_largest(factorization(CEP + 1)))\r\n\r\nprint(\"\\nTest 2 (New Year's Eve Prime)\")\r\nprint(\"Largest Prime Factor of q - 1:\",largestPrimeN)\r\nprint(\"Largest Prime Factor of Largest Prime Factor of (q - 1):\",check_prime_largest(factorization(largestPrimeN - 1)))\r\nprint(\"Largest Prime Factor of q + 1:\",check_prime_largest(factorization(NYEP + 1)))\r\n\r\n#print(factorization(9929 - 1))\r\n\r\n" }, { "alpha_fraction": 0.564257025718689, "alphanum_fraction": 0.5957161784172058, "avg_line_length": 31.977272033691406, "blob_id": "ffc066eeeba24fd2582a5f944908a62ac536471b", "content_id": "0259a2053d02a6f9f8bb7e694bd3d1c33454dc32", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2988, "license_type": "no_license", "max_line_length": 87, "num_lines": 88, "path": "/PPP09.py", "repo_name": "mloumeau/Discrete-Mathematics", "src_encoding": "UTF-8", "text": "from math import ceil, log\r\nfrom collections import Counter\r\nimport sympy\r\n\r\ndef buildTree(counterList):\r\n if len(counterList) < 2:\r\n return counterList[0][0]\r\n tempList = counterList\r\n temp1 = tempList.pop()\r\n temp2 = tempList.pop()\r\n temp3 = ((temp1[0], temp2[0]), temp1[1] + temp2[1])\r\n tempList.append(temp3)\r\n newCounter = Counter(dict(tempList))\r\n return(buildTree(newCounter.most_common()))\r\n\r\n\r\ndef readTree(treeList, sizeStr):\r\n if type(treeList) != tuple:\r\n return [(treeList, sizeStr)]\r\n newList = []\r\n if type(treeList[0]) == tuple:\r\n newList += readTree(treeList[0], sizeStr + 1)\r\n else:\r\n newList.append((treeList[0], sizeStr))\r\n if type(treeList[1]) == tuple:\r\n newList += readTree(treeList[1], sizeStr + 1)\r\n else:\r\n newList.append((treeList[1], sizeStr))\r\n return newList\r\n\r\n\r\ndef findCompressionRatio(list_of_gaps, asciiSize):\r\n list_of_frequencies = []\r\n list_of_unique_gaps = []\r\n numBitsDict = {}\r\n total_bits = 0\r\n \r\n counterForList = Counter(list_of_gaps)\r\n tempCounterList = counterForList.most_common()\r\n \r\n # Get list of frequencies\r\n for x in tempCounterList:\r\n list_of_frequencies.append(x[1])\r\n list_of_unique_gaps.append(x[0])\r\n numBitsDict[x[0]] = 0\r\n \r\n for x in readTree(buildTree(tempCounterList), 1):\r\n numBitsDict[x[0]] = x[1]\r\n\r\n for x in range(0, len(list_of_unique_gaps)):\r\n total_bits += list_of_frequencies[x] * numBitsDict.get(list_of_unique_gaps[x])\r\n total_characters = len(list_of_gaps)\r\n total_unique_characters = len(list_of_unique_gaps)\r\n fixed_bits_per_character = ceil(log(total_unique_characters, 2))\r\n total_fixed_bits = total_characters * fixed_bits_per_character\r\n if total_fixed_bits == 0:\r\n return 0\r\n compression_ratio = (total_fixed_bits - total_bits) / total_fixed_bits\r\n print(\"From Fixed: \", compression_ratio * 100, \"%\")\r\n print(\"From Binary: \", (((9999998 * 32) - total_bits) / (9999998 * 32)) * 100, \"%\")\r\n print(\"From ASCII: \", (asciiSize - total_bits) / asciiSize *100, \"%\")\r\n return compression_ratio\r\n\r\ndef asciiCompression():\r\n list_of_gaps = []\r\n prev = 3\r\n gap = 0\r\n sizeASCII = 0\r\n \r\n for primeVal in list(sympy.primerange(sympy.prime(3), sympy.prime(10000000))):\r\n gap = primeVal - prev\r\n prev = primeVal\r\n list_of_gaps.append(gap)\r\n # Each char is one byte (8 bits).\r\n # Then we add one extra char indicating a new line (\\n or other new line char)\r\n sizeASCII += (len(str(primeVal)) + 1) * 8\r\n\r\n findCompressionRatio(list_of_gaps, sizeASCII)\r\n \r\nasciiCompression()\r\n\r\ndef primes1(n):\r\n \"\"\" Returns a list of primes < n \"\"\"\r\n sieve = [True] * (n//2)\r\n for i in range(3,int(n**0.5)+1,2):\r\n if sieve[i//2]:\r\n sieve[i*i//2::i] = [False] * ((n-i*i-1)//(2*i)+1)\r\n return [2] + [2*i+1 for i in range(1,n//2) if sieve[i]]" }, { "alpha_fraction": 0.3982558250427246, "alphanum_fraction": 0.4505814015865326, "avg_line_length": 14.380952835083008, "blob_id": "aa98d2cce76326e5ff29841e2d1302f25f71c012", "content_id": "ea9bd2206bbf96ba358abaab1e1522b0d19a613f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 344, "license_type": "no_license", "max_line_length": 35, "num_lines": 21, "path": "/PPP03.py", "repo_name": "mloumeau/Discrete-Mathematics", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Sep 30 10:43:50 2020\r\n\r\n@author: matth\r\n\"\"\"\r\n\r\n\r\nfrom math import floor, sqrt\r\n\r\nif __name__ == \"__main__\":\r\n x = sqrt(2)\r\n y = 2 + x\r\n a = list()\r\n b = list()\r\n for i in range(1, 30):\r\n a.append(floor(x*i))\r\n b.append(floor(y * i))\r\n\r\n print(a, \"\\n\")\r\n print(b)\r\n" } ]
7
nithin9296/awitha
https://github.com/nithin9296/awitha
3f764c4df97301fee76c7d513d1ef12e6efed18f
dea5003297c03154d9a7a93f3f9352ab0d59bc4e
69d661f33481d0b1f2544da847bb2cb71c942130
refs/heads/master
2020-03-10T22:55:53.135309
2018-04-15T16:52:56
2018-04-15T16:52:56
124,306,317
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6635242700576782, "alphanum_fraction": 0.6642494797706604, "avg_line_length": 27.957447052001953, "blob_id": "4e808949ae0939184bf4bc9a49c5aa6206b43b51", "content_id": "6f5c0cd561b8c292808d7021a50c4659bd0dc72b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1379, "license_type": "no_license", "max_line_length": 95, "num_lines": 47, "path": "/notepad/utils.py", "repo_name": "nithin9296/awitha", "src_encoding": "UTF-8", "text": "# import pandas as pd\n\n\n# def get_model_field_names(model, ignore_fields=['content_object']):\n# \tmodel_fields = model._meta_get_fields()\n# \tmodel_field_names = list(set([f.name for f in model_fields if f.name not in ignore_fields]))\n# \treturn model_field_names\n\n\n# def get_lookup_fields(model, fields=None):\n# \tmodel_field_names = get_model_field_names(model)\n# \tif fields is not None:\n# \t\tlookup_fields = []\n# \t\tfor x in fields:\n# \t\t\tif \"__\" in x:\n# \t\t\t\tlookup_fields.append(x)\n# \t\t\telif x in model_field_names:\n# \t\t\t\tlookup_fields.append(x)\n# \telse:\n# \t\tlookup_fields = model_field_names\n\n# \treturn lookup_fields\n\n# def qs_to_dataset(qs, fields=None):\n# \tlookup_fields = get_lookup_fields(qs.model, fields=fields)\n# \treturn list(qs.values(*lookup_fields))\n\n\n# def convert_to_dataframe(qs, fields=None, index=None):\n# \tlookup_fields = get_lookup_fields(qs.model, fields=fields)\n# \tindex_col = None\n# \tif index in lookup_fields:\n# \t\tindex_col = index\n# \telif \"id\" in lookup_fields:\n# \t\tindex_col = 'id'\n\n# \tvalues = qs_to_dataset(qs, fields=fields)\n# \tdf = pd.DataFrame.from_records(values, colums=lookup_fields, index=index_col)\n# \treturn df\n\n# def get_client_ip(request):\n# \tx_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')\n# \tif x_forwarded_for:\n# \t\tip = x_forwarded_for.split(',')[0]\n# \telse:\n# \t\tip = request.META.get('REMOTE_ADDR')\n# \treturn ip\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.7453625798225403, "alphanum_fraction": 0.7622259855270386, "avg_line_length": 24.69565200805664, "blob_id": "593f5f7692b117bcd82e2a8d75f1248d574c5bda", "content_id": "c83b072f6321589a12fd40814b4c46404c278b70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 593, "license_type": "no_license", "max_line_length": 49, "num_lines": 23, "path": "/cycles/models.py", "repo_name": "nithin9296/awitha", "src_encoding": "UTF-8", "text": "from django.db import models\n\n# Create your models here.\n\n\nclass profitandloss(models.Model):\n\tdocumentid = models.IntegerField(default=0)\n\tglcode = models.IntegerField(default=0)\n\tgldescription = models.CharField(max_length=30)\n\tamount = models.IntegerField(default=0)\n\n\tdef __str__(self):\n\t\treturn str(self.gldescription)\n\n\nclass balancesheet(models.Model):\n\tdocumentid = models.IntegerField(default=0)\n\tglcode = models.IntegerField(default=0)\n\tgldescriptions = models.CharField(max_length=30)\n\tamount = models.IntegerField(default=0)\n\n\tdef __str__(self):\n\t\treturn str(self.gldescription)\n\n\n" }, { "alpha_fraction": 0.6901408433914185, "alphanum_fraction": 0.6901408433914185, "avg_line_length": 20.399999618530273, "blob_id": "e7aa9685e159daea93963c3015fca8fe9a131cf8", "content_id": "64e1a864fdb80c5c24b27ed8e9c8824f93735819", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 213, "license_type": "no_license", "max_line_length": 65, "num_lines": 10, "path": "/cycles/urls.py", "repo_name": "nithin9296/awitha", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\n\n\nfrom .views import importdata_view, my_custom_sql\n\nurlpatterns = [\n\turl(r'^importdata/$', importdata_view),\n\turl(r'^my_custom_sql/$', my_custom_sql, name = 'my_custom_sql'),\n\n\t]" }, { "alpha_fraction": 0.6528539061546326, "alphanum_fraction": 0.6921671032905579, "avg_line_length": 33.194766998291016, "blob_id": "4bffc296a275349c9265883749751634cca19edb", "content_id": "9409eceb0de651ddf072643ba7f7315d44381a5f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 23529, "license_type": "no_license", "max_line_length": 190, "num_lines": 688, "path": "/operating/views.py", "repo_name": "nithin9296/awitha", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, render_to_response, redirect\nfrom django.views.generic import ListView, DetailView, CreateView, UpdateView, View\nfrom django.urls import reverse_lazy\nfrom .forms import CompanyNetPercentageForm, LoginForm, SignUpForm, trialbalanceForm, feedbackform\nfrom django.contrib.auth import authenticate, login\nfrom .models import npmargin, CompanyNetPercentage, trialbalance, ObjectViewed\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom django.db.models import Sum, F\nfrom django.contrib.auth import login, authenticate\nfrom django.contrib.auth.forms import UserCreationForm\nimport django_excel as excel\nimport pyexcel\nfrom pyexcel import get_sheet\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import logout\nimport os\nimport csv\nfrom django.conf import settings\nfrom django.utils.text import slugify\nfrom operating.utils import get_lookup_fields, qs_to_dataset\nfrom io import StringIO\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.core.files import File\nfrom django.http import HttpResponse, StreamingHttpResponse\nfrom django.utils.text import slugify\nfrom django.views.generic import View\n# from operating.mixins import ObjectViewMixin\nfrom operating.signals import object_viewed_signal\nfrom .utils import convert_to_dataframe\ntry:\n\tfrom io import BytesIO as IO\nexcept ImportError:\n\tfrom StringIO import StringIO as IO\nimport pandas as pd\nimport xlsxwriter\n\n\nBASE_DIR = settings.BASE_DIR\n\n\n\nclass HomeView(View):\n\tdef get(self, request, *args, **kwargs):\n\t\treturn render(request, \"operating/home.html\", {})\n\n\ndef options(request):\n\treturn render(request, \"operating/options_page.html\", {})\n\n\ndef about(request):\n\treturn render(request, \"operating/about.html\", {})\n\n\n#@csrf_exempt\n\n# Create your views here.\n\n# class npmarginListView(ListView):\n# \tmodel \t\t\t\t= npmargin\n# \tcontext_object_name = 'netprofitmargin'\n\n\n# class npmarginCreateView(CreateView):\n# \tmodel \t\t\t\t= npmargin\n# \tfields\t\t\t\t= ('industry', 'npy2015', 'npy2016', 'npy2017')\n# \tsucess_url\t\t\t= reverse_lazy('npmargin_changelist')\n\n# class npmarginUpdateView(UpdateView):\n# \tmodel \t\t\t\t= npmargin\n# \tfields\t\t\t\t= ('industry', 'npy2015', 'npy2016', 'npy2017')\n# \tsucess_url\t\t\t= reverse_lazy('npmargin_changelist')\n\n\n# def npmargin_view(request):\n# \tif request.method == \"POST\":\n# \t\tform = CompanyNetPercentageForm(request.POST)\n\n# \t\tif form.is_valid():\n# \t\t\tform.save()\n# \t\t\tselected_industry = form.cleaned_data.get(\"industry\")\n# \t\t\tselected_user\t= form.cleaned_data.get(\"user\")\n# \t\t\tquery_results = npmargin.objects.filter(industry__industry_name=selected_industry)\n# \t\t\tcompany_query = CompanyNetPercentage.objects.filter(user=selected_user)\n# \t\t\tcontext = {\n# \t\t\t\t\t'query_results': query_results,\n# \t\t\t\t\t'company_query' : company_query\n# \t\t\t}\n# \t\t\treturn render(request, \"operating/detail.html\", context)\n# \telse:\n# \t\tform = CompanyNetPercentageForm()\n# \treturn render(request, \"operating/form.html\",{'form': form})\n\n\n# class npmarginListView(LoginRequiredMixin, ListView):\n# \tdef get_queryset(self):\n# \t\treturn CompanyNetPercentage.objects.filter(user=self.request.user)\n@login_required\ndef npmargin_view(request):\n\tif request.method == \"POST\":\n\t\tform = CompanyNetPercentageForm(request.POST)\n\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\tselected_industry = form.cleaned_data.get(\"industry\")\n\t\t\tselected_user = form.cleaned_data.get(\"user\")\n\n\t\t\trequest.session['selected_industry'] = selected_industry\n\t\t\trequest.session['selected_user'] = selected_user\n\n\n\n\t\t\tcontext = {\n\t\t\t\t\t'selected_industry': selected_industry,\n\t\t\t\t\t'selected_user': selected_user\n\t\t\t}\n \n\t\t\treturn render(request, \"charts2.html\", {})\n\telse:\n\t\tform = CompanyNetPercentageForm()\n\t\t\n\treturn render(request, \"operating/form.html\",{'form': form})\n\n\n\nclass ChartData(APIView):\n\n\tauthentication_classes = []\n\tpermission_classes = []\n\n\tdef get(self, request, format=None):\n\n\t\tselected_industry = request.session.get('selected_industry')\n\t\tselected_user = request.session.get('selected_user')\n\t\t#selected_industry: request.GET.get('selected_industry')\n\t\t#selected_user: request.GET.get('selected_user')\n\n\t\tindustry_results = npmargin.objects.filter(industry__industry_name=selected_industry)\n\t\tnp2015 = (CompanyNetPercentage.objects.filter(user=selected_user)\n\t\t\t\t\t\t\t\t\t .aggregate(np2015=Sum('netmargin_percentage_2015'))['np2015'])\n\t\tnp2016 = (CompanyNetPercentage.objects.filter(user=selected_user)\n\t\t\t\t\t\t\t\t\t .aggregate(np2016=Sum('netmargin_percentage_2016'))['np2016'])\n\t\tnp2017 = (CompanyNetPercentage.objects.filter(user=selected_user)\n\t\t\t\t\t\t\t\t\t .aggregate(np2017=Sum('netmargin_percentage_2017'))['np2017'])\n\t\tgp2017 = (CompanyNetPercentage.objects.filter(user=selected_user)\n\t\t\t\t\t\t\t\t\t .aggregate(gp2017=Sum('gross_profit_margin_2017'))['gp2017'])\n\t\tgp2016 = (CompanyNetPercentage.objects.filter(user=selected_user)\n\t\t\t\t\t\t\t\t\t .aggregate(gp2016=Sum('gross_profit_margin_2016'))['gp2016'])\n\t\tgp2015 = (CompanyNetPercentage.objects.filter(user=selected_user)\n\t\t\t\t\t\t\t\t\t .aggregate(gp2015=Sum('gross_profit_margin_2015'))['gp2015'])\n\t\tde2015 = (CompanyNetPercentage.objects.filter(user=selected_user)\n\t\t\t\t\t\t\t\t\t .aggregate(de2015=Sum('debtequity_ratio_2015'))['de2015'])\n\t\tde2016 = (CompanyNetPercentage.objects.filter(user=selected_user)\n\t\t\t\t\t\t\t\t\t .aggregate(de2016=Sum('debtequity_ratio_2016'))['de2016'])\n\t\tde2017 = (CompanyNetPercentage.objects.filter(user=selected_user)\n\t\t\t\t\t\t\t\t\t .aggregate(de2017=Sum('debtequity_ratio_2017'))['de2017'])\n\t\tnp2015i = (npmargin.objects.filter(industry__industry_name=selected_industry)\n\t\t\t\t\t\t\t\t\t.aggregate(np2015i=Sum('netmargin_percentage_2015'))['np2015i'])\n\n\t\tnp2016i = (npmargin.objects.filter(industry__industry_name=selected_industry)\n\t\t\t\t\t\t\t\t\t.aggregate(np2016i=Sum('netmargin_percentage_2016'))['np2016i'])\n\n\t\tnp2017i = (npmargin.objects.filter(industry__industry_name=selected_industry)\n\t\t\t\t\t\t\t\t\t.aggregate(np2017i=Sum('netmargin_percentage_2017'))['np2017i'])\n\n\t\tsales_2017 = (trialbalance.objects.filter(classification__in=['sales']).aggregate(sales_2017=Sum('credit_2017'))['sales_2017'])\n\t\tcgs_2017 = (trialbalance.objects.filter(classification__in=['costofgoodsold']).aggregate(cgs_2017=Sum('debit_2017'))['cgs_2017'])\n\t\tgross_profit_2017 = sales_2017 - cgs_2017\n\t\tgross_profit_margin_2017 = gross_profit_2017 / sales_2017\n\n\t\tsales_2016 = (trialbalance.objects.filter(classification__in=['sales']).aggregate(sales_2016=Sum('credit_2016'))['sales_2016'])\n\t\tcgs_2016 = (trialbalance.objects.filter(classification__in=['costofgoodsold']).aggregate(cgs_2016=Sum('debit_2016'))['cgs_2016'])\n\t\tgross_profit_2016 = sales_2016 - cgs_2016\n\t\tgross_profit_margin_2016 = gross_profit_2016 / sales_2016\n\n\t\tsales_2015 = (trialbalance.objects.filter(classification__in=['sales']).aggregate(sales_2015=Sum('credit_2015'))['sales_2015'])\n\t\tcgs_2015 = (trialbalance.objects.filter(classification__in=['costofgoodsold']).aggregate(cgs_2015=Sum('debit_2015'))['cgs_2015'])\n\t\tgross_profit_2015 = sales_2015 - cgs_2015\n\t\tgross_profit_margin_2015 = gross_profit_2015 / sales_2015\n\n\t\tdebt_cy = (trialbalance.objects.filter(classification__in=['debt']).aggregate(debt_cy=Sum('credit_cy'))['debt_cy'])\n\t\tequity_cy = (trialbalance.objects.filter(classification__in=['equity']).aggregate(equity_cy=Sum('credit_cy'))['equity_cy'])\n\t\tdebtequity_ratio_cy = debt_cy / equity_cy\n\n\t\tdebt_py = (trialbalance.objects.filter(classification__in=['debt']).aggregate(debt_py=Sum('credit_py'))['debt_py'])\n\t\tequity_py = (trialbalance.objects.filter(classification__in=['equity']).aggregate(equity_py=Sum('credit_py'))['equity_py'])\n\t\tdebtequity_ratio_py = debt_py / equity_py\n\n\t\t\t\t\n\n\t\tlabels = [\"2015\", \"2016\", \"2017\", ]\n\t\tdefault_items_c = [np2015, np2016, np2017]\n\t\tdefault_items_i = [np2015i, np2016i, np2017i]\n\t\tdefault_items_gp = [gross_profit_margin_cy, gross_profit_margin_py]\n\t\tdefault_items_de = [debtequity_ratio_cy, debtequity_ratio_py]\n\n\t\tdata = {\n\t\t\t\t\"labels\" : labels,\n\t\t\t\t\"default_c\" : default_items_c,\n\t\t\t\t\"default_i\" : default_items_i,\n\t\t\t\t\"default_gp\" : default_items_gp,\n\t\t\t\t\"default_de\" : default_items_de,\n\t\t\t\t}\n\t\treturn Response(data)\n\n\nclass ChartData_op2(APIView):\n\n\tauthentication_classes = []\n\tpermission_classes = []\n\n\tdef get(self, request, format=None):\n\n\t\tselected_industry = request.session.get('selected_industry')\n\t\tselected_user = request.session.get('selected_user')\n\t\t#selected_industry: request.GET.get('selected_industry')\n\t\t#selected_user: request.GET.get('selected_user')\n\n\t\tindustry_results = npmargin.objects.filter(industry__industry_name=selected_industry)\n\t\tnp2015 = (CompanyNetPercentage.objects.filter(user=selected_user)\n\t\t\t\t\t\t\t\t\t .aggregate(np2015=Sum('netmargin_percentage_2015'))['np2015'])\n\t\tnp2016 = (CompanyNetPercentage.objects.filter(user=selected_user)\n\t\t\t\t\t\t\t\t\t .aggregate(np2016=Sum('netmargin_percentage_2016'))['np2016'])\n\t\tnp2017 = (CompanyNetPercentage.objects.filter(user=selected_user)\n\t\t\t\t\t\t\t\t\t .aggregate(np2017=Sum('netmargin_percentage_2017'))['np2017'])\n\t\tgp2017 = (CompanyNetPercentage.objects.filter(user=selected_user)\n\t\t\t\t\t\t\t\t\t .aggregate(gp2017=Sum('gross_profit_margin_2017'))['gp2017'])\n\t\tgp2016 = (CompanyNetPercentage.objects.filter(user=selected_user)\n\t\t\t\t\t\t\t\t\t .aggregate(gp2016=Sum('gross_profit_margin_2016'))['gp2016'])\n\t\tgp2015 = (CompanyNetPercentage.objects.filter(user=selected_user)\n\t\t\t\t\t\t\t\t\t .aggregate(gp2015=Sum('gross_profit_margin_2015'))['gp2015'])\n\t\tde2015 = (CompanyNetPercentage.objects.filter(user=selected_user)\n\t\t\t\t\t\t\t\t\t .aggregate(de2015=Sum('debtequity_ratio_2015'))['de2015'])\n\t\tde2016 = (CompanyNetPercentage.objects.filter(user=selected_user)\n\t\t\t\t\t\t\t\t\t .aggregate(de2016=Sum('debtequity_ratio_2016'))['de2016'])\n\t\tde2017 = (CompanyNetPercentage.objects.filter(user=selected_user)\n\t\t\t\t\t\t\t\t\t .aggregate(de2017=Sum('debtequity_ratio_2017'))['de2017'])\n\t\tnp2015i = (npmargin.objects.filter(industry__industry_name=selected_industry)\n\t\t\t\t\t\t\t\t\t.aggregate(np2015i=Sum('netmargin_percentage_2015'))['np2015i'])\n\n\t\tnp2016i = (npmargin.objects.filter(industry__industry_name=selected_industry)\n\t\t\t\t\t\t\t\t\t.aggregate(np2016i=Sum('netmargin_percentage_2016'))['np2016i'])\n\n\t\tnp2017i = (npmargin.objects.filter(industry__industry_name=selected_industry)\n\t\t\t\t\t\t\t\t\t.aggregate(np2017i=Sum('netmargin_percentage_2017'))['np2017i'])\n\n\t\tgp2017i = (npmargin.objects.filter(industry__industry_name=selected_industry)\n\t\t\t\t\t\t\t\t\t.aggregate(np2017i=Sum('netmargin_percentage_2017'))['np2017i'])\n\t\t\t\t\n\n\t\tlabels = [\"2015\", \"2016\", \"2017\", ]\n\t\tdefault_items_c = [np2015, np2016, np2017]\n\t\tdefault_items_i = [np2015i, np2016i, np2017i]\n\t\tdefault_items_gp = [gp2015, gp2016, gp2017]\n\t\tdefault_items_de = [de2015, de2016, de2017]\n\n\t\tdata = {\n\t\t\t\t\"labels\" : labels,\n\t\t\t\t\"default_c\" : default_items_c,\n\t\t\t\t\"default_i\" : default_items_i,\n\t\t\t\t\"default_gp\" : default_items_gp,\n\t\t\t\t\"default_de\" : default_items_de,\n\t\t\t\t}\n\t\treturn Response(data)\n\n\n# def productview(request):\n# \t\tselected_industry = request.session.get('selected_industry')\n# \t\tselected_user = request.session.get('selected_user')\n\n# \t\t# industry_results = npmargin.objects.filter(industry__industry_name=selected_industry)\n# \t\t# np2015 = (CompanyNetPercentage.objects.filter(user=selected_user)\n# \t\t# \t\t\t\t\t\t\t .aggregate(np2015=Sum('netmargin_percentage_2015'))['np2015'])\n\t\t\n# \t\tqs = CompanyNetPercentage.objects.filter(user=selected_user)\n# \t\tdf = convert_to_dataframe(qs, fields=['gross_profit_margin_2015', 'gross_profit_margin_2016', 'gross_profit_margin_2017',\n# \t\t\t\t\t\t\t\t\t\t\t'debtequity_ratio_2015', 'debtequity_ratio_2016', 'debtequity_ratio_2017'\n# \t\t\t\t\t\t\t\t\t\t\t'netmargin_percentage_2015', 'netmargin_percentage_2016', 'netmargin_percentage_2017'])\n\n# \t\tjson = df.to_json(orient='records')\n\n\n# \t\tcontext = {\n# \t\t\t\t\t\"data\": json\n# \t\t\t\t}\n\n# \t\treturn render (request, 'operating/product.html', context)\n\ndef productview(request):\n\t\tselected_industry = request.session.get('selected_industry')\n\t\tselected_user = request.session.get('selected_user')\n\n\t\t# industry_results = npmargin.objects.filter(industry__industry_name=selected_industry)\n\t\t# np2015 = (CompanyNetPercentage.objects.filter(user=selected_user)\n\t\t# \t\t\t\t\t\t\t .aggregate(np2015=Sum('netmargin_percentage_2015'))['np2015'])\n\t\t\n\t\tqs1 = CompanyNetPercentage.objects.filter(user=selected_user)\n\t\tqs2 = CompanyNetPercentage.objects.filter(industry=selected_industry)\n\n\t\tdf1 = convert_to_dataframe(qs1, fields=['user', 'gross_profit_margin_2015', 'gross_profit_margin_2016', 'gross_profit_margin_2017',\n\t\t\t\t\t\t\t\t\t\t\t'debtequity_ratio_2015', 'debtequity_ratio_2016', 'debtequity_ratio_2017',\n\t\t\t\t\t\t\t\t\t\t\t'netmargin_percentage_2015', 'netmargin_percentage_2016', 'netmargin_percentage_2017'])\n\t\tdf2 = convert_to_dataframe(qs2, fields=['user', 'gross_profit_margin_2015', 'gross_profit_margin_2016', 'gross_profit_margin_2017',\n\t\t\t\t\t\t\t\t\t\t\t'debtequity_ratio_2015', 'debtequity_ratio_2016', 'debtequity_ratio_2017', \n\t\t\t\t\t\t\t\t\t\t\t'netmargin_percentage_2015', 'netmargin_percentage_2016', 'netmargin_percentage_2017', \n\t\t\t\t\t\t\t\t\t\t\t])\n\t\tframes = [df1, df2]\n\t\tresult = pd.concat(frames)\n\t\texcel_file = IO()\n\t\txlwriter = pd.ExcelWriter(excel_file, engine='xlsxwriter')\n\n\t\tresult.to_excel(xlwriter, 'sheetname')\n\t\txlwriter.save()\n\t\txlwriter.close()\n\t\texcel_file.seek(0)\n\n\t\tresponse = HttpResponse(excel_file.read(), content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')\n\n\t\tresponse['content-Disposition'] = 'attachment; filename=myfile.xlsx'\n\t\treturn response\n\nclass CSVResponseMixin(object):\n\tcsv_filename = 'csvfile.csv'\n\n\tdef get_csv_filename(self):\n\t\treturn self.csv_filename\n\n\tdef render_to_csv(self, context):\n\t\tresponse = HttpResponse(content_type='text/csv')\n\t\t\n\t\tcd = 'attachment; filename=\"{0}\"'.format(self.get_csv_filename())\n\t\tresponse['content-Disposition'] = cd\n\n\t\twriter = csv.DictWriter(response, fieldnames=fieldnames\t)\n\t\t# writer.writeheader()\n\t\tfor row in context:\n\t\t\twriter.writerow(row)\n\n\t\treturn response\n\nclass DataView(CSVResponseMixin, View):\n\n\tdef get(self, request, *args, **kwargs):\n\t\tselected_industry = request.session.get('selected_industry')\n\t\tselected_user = request.session.get('selected_user')\n\t\tqs = CompanyNetPercentage.objects.filter(user=selected_user)\n\t\tdf = convert_to_dataframe(qs, fields=['gross_profit_margin_2015', 'gross_profit_margin_2016', 'gross_profit_margin_2017',\n\t\t\t\t\t\t\t\t\t\t\t'debtequity_ratio_2015', 'debtequity_ratio_2016', 'debtequity_ratio_2017'\n\t\t\t\t\t\t\t\t\t\t\t'netmargin_percentage_2015', 'netmargin_percentage_2016', 'netmargin_percentage_2017'])\n\n\t\tjson = df.to_json(orient='records')\n\n\n\t\tcontext = {\n\t\t\t\t\t\"data\": json\n\t\t\t\t}\n\n\t\t\n\t\treturn self.render_to_csv(context)\n\n\n\n\n\n\n# class HomeView(View):\n# def get(self, request, *args, **kwargs):\n# return render(request, 'operating/charts.html', {})\n\t\n\n\n\n# class ChartData(APIView):\n \n# authentication_classes = []\n# permission_classes = []\n\n# def get(self, request, format=None):\n# qs_count = trialbalance2017.objects.all().count()\n \n# current_asset = (trialbalance2017.objects\n# .filter(description__in=['cash', 'debtors', 'inventory'])\n# .aggregate(\n# current_asset=Sum('debit')\n# )['current_asset']\n# )\n \n# current_liab = (trialbalance2017.objects\n# .filter(description__in=['creditors', 'loans'])\n# .aggregate(\n# current_liab=Sum('credit')\n# )['current_liab']\n# )\n# current_assets = current_asset - current_liab \n# labels = [\"Users\", \"current_assets\", \"current_asset\", \"current_liab\", ]\n# default_items = [qs_count, current_assets, current_asset, current_liab,]\n \n \n# data = {\n# \"labels\" : labels,\n# \"default\" : default_items,\n# } \n# return Response(data)\n\n\n\n\n\n\n\n\n\ndef npmargin_detail_view(request, pk=None, *args, **kwags):\n\tqs = npmargin.objects.filter(industry=pk)\n\tcontext = {\n\t\t'object' : qs\n\t}\n\treturn render(request, \"operating/detail.html\", context)\n\n\n\ndef signup(request):\n\tif request.method == 'POST':\n\t\tform = SignUpForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\tusername = form.cleaned_data.get('username')\n\t\t\traw_password = form.cleaned_data.get('password1')\n\t\t\tuser = authenticate(username=username, password=raw_password)\n\t\t\tlogin(request,user)\n\t\t\treturn redirect('home')\n\telse:\n\t\tform = SignUpForm()\n\treturn render(request, 'operating/signup.html', {'form': form})\n\ndef feedback(request):\n\tif request.method == 'POST':\n\t\tform = feedbackform(request.POST)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\treturn redirect('home')\n\n\telse:\n\t\tform = feedbackform(),\n\treturn render(request, 'operating/feedback.html', {'form': form})\n\n\ndef login_page(request):\n\tform = LoginForm(request.POST or None)\n\tcontext = {\n\t\t\"form\": form\n\t}\n\tprint(\"User logged in\")\n\t#print(request.user.is_authenticated())\n\tif form.is_valid():\n\t\tprint(form.cleaned_data)\n\t\tusername = form.cleaned_data.get(\"username\")\n\t\tpassword = form.cleaned_data.get(\"password\")\n\t\tuser = authenticate(request, username=username, password=password)\n\t\tprint (user)\n\t\t#print(request.user.is_authenticated())\n\t\tif user is not None:\n\t\t\t#print(request.user.is_authenticated())\n\t\t\tlogin(request, user)\n\t\t\tcontext['form'] = LoginForm()\n\t\t\treturn redirect(\"/operating/options\")\n\t\telse:\n\t\t\tprint(\"Error\")\n\n\treturn render(request, \"operating/login.html\", context)\n\n\n\ndef logout_view(request):\n logout(request)\n\n\n# def register_page(request):\n# \tform = LoginForm(request.POST or None)\n# \tif form.is_valid():\t\n# \t\tprint(form.cleaned_data)\n# \treturn render(request, \"auth/login.html\", {})\n\t\n\ndef trialbalanceanalysis(request):\n\tif request.method == \"POST\":\n\t\tform = trialbalanceForm(request.POST, request.FILES)\n\t\tif form.is_valid():\n\t\t\trequest.FILES['trialbalancefile'].save_book_to_database(\n\t\t\t\tmodels=[trialbalance],\n\t\t\t\tinitializer=[None],\n\t\t\t\tmapdicts=[\n\t\t\t\t\t['user', 'region', 'industry', 'glcode', 'gldescription', 'classification', 'subclassification', 'debit_2017', 'credit_2017', 'debit_2016', 'credit_2016', 'debit_2015', 'credit_2015' ]]\n\t\t\t)\n\t\t\tselected_industry = form.cleaned_data.get(\"industry\")\n\t\t\tselected_user = form.cleaned_data.get(\"user\")\n\n\t\t\trequest.session['selected_industry'] = selected_industry\n\t\t\trequest.session['selected_user'] = selected_user\n\n\n\n\t\t\tcontext = {\n\t\t\t\t\t'selected_industry': selected_industry,\n\t\t\t\t\t'selected_user': selected_user\n\t\t\t}\n \n\t\t\treturn render(request, \"charts.html\", {})\n\t\telse:\n\t\t\treturn HttpResponseBadRequest()\n\telse:\n\t\tform = trialbalanceForm()\n\t\treturn render( request,'operating/prelim_analysis.html', {'form': form})\n\n# def handson_table(request):\n# return excel.make_response_from_tables(\n# [trialbalance], 'handsontable.html')\n\n\n# def embed_handson_table(request):\n# \"\"\"\n# Renders two table in a handsontable\n# \"\"\"\n# content = excel.pe.save_book_as(\n# models=[trialbalance],\n# dest_file_type='handsontable.html',\n# dest_embed=True)\n# content.seek(0)\n# return render(\n# request,\n# 'custom-handson-table.html',\n# {\n# 'handsontable_content': content.read()\n# })\n\n\n# def embed_handson_table_from_a_single_table(request):\n# \"\"\"\n# Renders one table in a handsontable\n# \"\"\"\n# content = excel.pe.save_as(\n# model=trialbalance,\n# dest_file_type='handsontable.html',\n# dest_embed=True)\n# content.seek(0)\n# return render(\n# request,\n# 'custom-handson-table.html',\n# {\n# 'handsontable_content': content.read()\n# })\n\n\n\n\n\n@login_required\ndef rationop1(request):\n\tsales_cy = (trialbalance.objects.filter(classification__in=['sales']).aggregate(sales_cy=Sum('credit_cy'))['sales_cy'])\n\tcgs_cy = (trialbalance.objects.filter(classification__in=['costofgoodsold']).aggregate(cgs_cy=Sum('debit_cy'))['cgs_cy'])\n\tgross_profit_cy = sales_cy - cgs_cy\n\tgross_profit_margin_cy = gross_profit_cy / sales_cy\n\n\tsales_py = (trialbalance.objects.filter(classification__in=['sales']).aggregate(sales_py=Sum('credit_py'))['sales_py'])\n\tcgs_py = (trialbalance.objects.filter(classification__in=['costofgoodsold']).aggregate(cgs_py=Sum('debit_cy'))['cgs_py'])\n\tgross_profit_py = sales_py - cgs_py\n\tgross_profit_margin_py = gross_profit_py / sales_py\n\n\tdebt_cy = (trialbalance.objects.filter(classification__in=['debt']).aggregate(debt_cy=Sum('credit_cy'))['debt_cy'])\n\tequity_cy = (trialbalance.objects.filter(classification__in=['equity']).aggregate(cgs_cy=Sum('credit_cy'))['equity_cy'])\n\tdebtequity_ratio_cy = debt_cy / equity_cy\n\n\tdebt_py = (trialbalance.objects.filter(classification__in=['debt']).aggregate(sales_py=Sum('credit_py'))['debt_py'])\n\tequity_py = (trialbalance.objects.filter(classification__in=['equity']).aggregate(cgs_py=Sum('debit_cy'))['equity_py'])\n\tdebtequity_ratio_py = debt_py / equity_py\t\n\n\n\n\t\n\n\tcontext = {\n\t\t\"gross_profit_margin_cy\" : gross_profit_margin_cy,\n\t\t\"gross_profit_margin_py\" : gross_profit_margin_py,\n\t\t\"debtequity_ratio_cy\" : debtequity_ratio_cy,\n\t\t\"debtequity_ratio_py\" : debtequity_ratio_py\n\t}\n\n\tobject_viewed_signal.send(instance.__class__, instance=context, request=request)\n\treturn render(request, 'operating/rationop1.html', context)\n\n\n\n\n\ndef qs_to_local_csv(qs, fields=None, path=None, filename=None):\n\tif path is None:\n\t\tpath = os.path.join(os.path.dirname(BASE_DIR), 'csvstorage')\n\n\t\tif not os.path.exists(path):\n\t\t\tos.mkdir(path)\n\n\tif filename is None:\n\t\tmodel_name = slugify(qs.model.__name__)\n\t\tfile_name = \"{}.csv\".format(model_name)\n\tfilepath = os.path.join(path, filename)\n\tlookups = get_lookup_fields(qs.model, fields=fields)\n\tdataset = qs_to_dataset(qs, fields)\n\trow_done = 0\n\twith open(filepath, 'w') as my_file:\n\t\twriter = csv.DictWriter(my_file, filenames=lookups)\n\t\twriter.writeheader()\n\t\tfor data_item in dataset:\n\t\t\twriter.writerow(data_item)\n\t\t\trows_done += 1\n\tprint(\"{} rows completed\".format(rows_done))\n\n\nclass Echo:\n\tdef write (self, value):\n\t\treturn value\n\nclass CSVDownloadView(LoginRequiredMixin, View):\n\tdef get(self, request, *args, **kwargs):\n\t\tqs = ObjectViewed.objects.all()\n\t\tmodel_name = slugify(qs.model.__name__)\n\t\tfilename = \"{}.csv\".format(model_name)\n\t\tfp = StringIO()\n\t\tpseudo_buffer = Echo()\n\t\toutcsv = csv.writer(pseudo_buffer)\n\t\twriter = csv.DictWriter(my_file, fieldnames=lookups)\n\t\twriter.writeheader()\n\t\tfor data_item in dataset:\n\t\t\twriter.writerow(data_item)\n\t\tstream_file = file(fp)\n\t\tresponse = StreamingHttpResponse(stream_file,\n\t\t\t\t\t\t\t\t\t\tcontent_type =\"text/csv\")\n\t\tresponse['content-Disposition'] = 'attachment; filename=\"{}\"'.format(filename)\n\t\treturn response\n\n\n\n# def user(request, user_id):\n# \tuser = get_object_or_404(User, pk=user_id)\n# \treturn render_to_response('')\t\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# class npmarginListView(ListView):\n# \tqueryset = npmargin.objects.all()\n# \ttemplate_name = \"operating/list.html\"\n\n# \tdef get_queryset(self, *args, **kwargs):\n# \t\trequest = self.request\n# \t\treturn npmargin.objects.all()\n\n\n# class npmarginDetailView(DetailView):\n# \ttemplate_name = \"operating/detail.html\"\n\n# \tdef get_object(self, *args, **kwargs):\n# \t\trequest = self.request\n# \t\tpk = self.kwargs.get('pk')\n# \t\tinstance = npmargin.objects.get_by_id(pk)\n# \t\tif instance is None:\n# \t\t\traise Http404(\"Product doesn't exist\")\n# \t\treturn instance\n\n\n\n" }, { "alpha_fraction": 0.6766581535339355, "alphanum_fraction": 0.7008928656578064, "avg_line_length": 33.47252655029297, "blob_id": "352ffec8110c30ee994e6d51e8adf34099dcbeca", "content_id": "00123a5e902e9fbdfc216b9ef9480dd04e3481c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3136, "license_type": "no_license", "max_line_length": 287, "num_lines": 91, "path": "/operating/forms.py", "repo_name": "nithin9296/awitha", "src_encoding": "UTF-8", "text": "from django import forms\nfrom django.forms import ModelChoiceField, ModelForm\nfrom .models import Region, Industry, CompanyNetPercentage, trialbalance, feedback\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.models import User\n\n\n\n# REGION_CHOICES=[\n# \t('United States', 'United States'),\n# \t('Aus, NZ & Canada', 'Aus, NZ & Canada'),\n# \t('Europe', 'Europe'),\n# \t('Emerging Markets', 'Emerging Markets'),\n# \t('Japan','Japan'),\n# \t]\n\n# INDUSTRY_CHOICES=[\n# \t('Advertising', 'Advertising'),\n# \t('aerospace', 'aerospace'),\n# \t('air transport', 'air transport'),\n# \t('apparel', 'apparel'),\n# \t]\n\n\n\nclass CompanyNetPercentageForm(ModelForm):\n\tclass Meta:\n\t\tmodel = CompanyNetPercentage\n\t\tfields = ['user', 'region', 'industry', 'gross_profit_margin_2015', 'gross_profit_margin_2016', 'gross_profit_margin_2017', 'debtequity_ratio_2015','debtequity_ratio_2016', 'debtequity_ratio_2017', 'netmargin_percentage_2015', 'netmargin_percentage_2016', 'netmargin_percentage_2017']\n\t\t# def __int__(self, request, *args, **kwargs):\n\t\t# \tsuper(CompanyNetPercentageForm, self).__init__(*args, **kwargs)\n\t\t# \tself.fields[]\n\t\tdef clean_user(self):\n\t\t\tuser = self.cleaned_data.get('user')\n\t\t\tqs = CompanyNetPercentage.objects.filter(user=user)\n\t\t\tif qs.exists():\n\t\t\t\traise forms.ValidationError(\"User already exists\")\n\t\t\treturn user\n\n\n\t# region = forms.CharField(label='Please select the Region', widget=forms.Select(choices=REGION_CHOICES))\n\t# industry = forms.CharField(label='Please select the Industry', widget=forms.Select(choices=INDUSTRY_CHOICES))\n\t# #industry = forms.ModelChoiceField(queryset= Industry.objects.values_list('name'))\n\t# npy2015 = forms.DecimalField(decimal_places=2, max_digits=20)\n\t# npy2016 = forms.DecimalField(decimal_places=2, max_digits=20)\n\t# npy2017 = forms.DecimalField(decimal_places=2, max_digits=20)\n\n\nclass LoginForm(forms.Form):\n\tusername = forms.CharField()\n\tpassword = forms.CharField(widget=forms.PasswordInput)\n\n\n\n\nclass SignUpForm(UserCreationForm):\n first_name = forms.CharField(max_length=30, required=False, help_text='Optional.')\n last_name = forms.CharField(max_length=30, required=False, help_text='Optional.')\n email = forms.EmailField(max_length=254, help_text='Required. Inform a valid email address.')\n\n class Meta:\n model = User\n fields = ('username', 'first_name', 'last_name', 'email', 'password1', 'password2', )\n\n\n\nclass trialbalanceForm(forms.ModelForm):\n\ttrialbalancefile = forms.FileField(\n label = 'Upload trialblanace as per the format' \n )\n\t# user = forms.CharField(max_length=25)\n\t# region = forms.CharField(max_length=25, choices=REGION_CHOICES)\n\tclass Meta:\n\t\tmodel = trialbalance\n\t\tfields = ('user', 'region', 'industry',)\n\n# class trialbalanceForm(forms.Form):\n# \ttrialbalancefile = forms.FileField(\n# label = 'Upload trialblanace as per the format' \n# )\n\n# \n\nclass feedbackform(forms.ModelForm):\n\tfeedback = forms.CharField(max_length=10000,\n\t\t\twidget=forms.TextInput(attrs={'autofocus': 'autofocus',\n\t\t\t\t\t\t\t\t\t\t\t'size': '100'}))\n\n\tclass Meta:\n\t\tmodel = feedback\n\t\tfields = ('feedback',)" }, { "alpha_fraction": 0.8283582329750061, "alphanum_fraction": 0.8283582329750061, "avg_line_length": 21.35714340209961, "blob_id": "20f3ce99eaf895289ef4653f61a7ce540ba74685", "content_id": "8326a48065afc58c0432e9521343dced1c6bc0be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 938, "license_type": "no_license", "max_line_length": 140, "num_lines": 42, "path": "/operating/admin.py", "repo_name": "nithin9296/awitha", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom import_export import resources\nfrom import_export.admin import ImportExportModelAdmin, ImportExportMixin, ImportMixin, ExportActionModelAdmin, ImportExportActionModelAdmin\n\n\n\n# Register your models here.\nfrom .models import npmargin, Industry, Region, UserName, CompanyNetPercentage, ObjectViewed, feedback\n\n\nadmin.site.register(Region)\nadmin.site.register(UserName)\nadmin.site.register(CompanyNetPercentage)\nadmin.site.register(ObjectViewed)\nadmin.site.register(feedback)\n\n\nclass Industryresource(resources.ModelResource):\n\n\tclass Meta:\n\t\tmodel = Industry\n\n\n\nclass npmarginresource(resources.ModelResource):\n\n\tclass Meta:\n\t\tmodel = npmargin\n\n\n\nclass Industryadmin(ImportExportModelAdmin):\n\tresource_class = Industryresource\n\t\n\n\nclass npmarginadmin(ImportExportModelAdmin):\n\tresource_class = npmarginresource\n\n\nadmin.site.register(Industry, Industryadmin)\nadmin.site.register(npmargin, npmarginadmin)" }, { "alpha_fraction": 0.5446224212646484, "alphanum_fraction": 0.5926773548126221, "avg_line_length": 20.850000381469727, "blob_id": "7585a8554f6172eba83cdb811891070a591e2837", "content_id": "1bf0d695512391cb9b5ca230828823a471eedcc8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 437, "license_type": "no_license", "max_line_length": 48, "num_lines": 20, "path": "/cycles/migrations/0002_auto_20180327_2212.py", "repo_name": "nithin9296/awitha", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.5 on 2018-03-27 18:12\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('cycles', '0001_initial'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='balancesheet',\n old_name='gldescription',\n new_name='gldescriptions',\n ),\n ]\n" }, { "alpha_fraction": 0.7242026329040527, "alphanum_fraction": 0.7251406908035278, "avg_line_length": 27.078947067260742, "blob_id": "702bf8f0d8c8e35a1dcbdd00ab70d1ffc345eac7", "content_id": "558183bfd4762af84f06759347e58761d807cc71", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1066, "license_type": "no_license", "max_line_length": 97, "num_lines": 38, "path": "/cycles/views.py", "repo_name": "nithin9296/awitha", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom .forms import profitandlossform, balancesheetform\nfrom django.db import connection\nfrom .models import profitandloss, balancesheet\n\n\n\ndef importdata_view(request):\n\n\tif request.method == 'POST':\n\t\tform = profitandlossform(request.POST, prefix=\"plf\")\n\t\tsub_form = balancesheetform(request.POST, prefix=\"bsf\")\n\n\t\tif form.is_valid() and sub_form.is_valid:\n\t\t\tplf = form.save\n\t\t\tbsf = sub_form.save(plf)\n\n\t\t\treturn redirect(my_custom_sql)\n\telse:\n\t\tform = profitandlossform(prefix=\"plf\")\n\t\tsub_form = balancesheetform(prefix=\"bsf\")\n\n\treturn render(request, \"prelim_analysis.html\", {'form':form, 'sub_form':sub_form})\n\n\ndef my_custom_sql(request):\n\tcursor = connection.cursor()\n\tcursor.execute('''SELECT p.documentid, b.glcode, b.gldescriptions, b.amount FROM profitandloss p\n\t\t\t\t\tJOIN balancesheet b\n\t\t\t\t\tON p.documentid = b.documentid''')\n\tids = []\n\tfor row in cursor.fetchall():\n\t\tid = row[0]\n\t\tids.append(id)\n\tcontext = {\"rows\": ids}\n\n\treturn render (request, \"sql.html\", context)" }, { "alpha_fraction": 0.575406014919281, "alphanum_fraction": 0.7378190159797668, "avg_line_length": 14.962963104248047, "blob_id": "b517a0659383b2b5a6563070447587b931e00fc5", "content_id": "c860e123f2cd1af89ee6ed5c0ac0831da8af16f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 431, "license_type": "no_license", "max_line_length": 27, "num_lines": 27, "path": "/requirements.txt", "repo_name": "nithin9296/awitha", "src_encoding": "UTF-8", "text": "diff-match-patch==20121119\ndj-database-url==0.5.0\nDjango==1.11.5\ndjango-crispy-forms==1.7.1\ndjango-import-export==1.0.0\ndjangorestframework==3.7.7\net-xmlfile==1.0.1\ngunicorn==19.7.1\njdcal==1.3\nmysqlclient==1.3.12\nodfpy==1.3.6\nopenpyxl==2.5.0\nPillow==5.0.0\npsycopg2-binary\npytz==2018.3\nPyYAML==3.12\ntablib==0.12.1\nunicodecsv==0.14.1\nxlrd==1.1.0\nxlwt==1.3.0\nboto \nboto3 \ndjango-storages\ndjango_excel\nsorl-thumbnail\npandas\nxlsxwriter\n" }, { "alpha_fraction": 0.762499988079071, "alphanum_fraction": 0.762499988079071, "avg_line_length": 25.733333587646484, "blob_id": "e9363d66d437602976d138eb13f9353c3230610f", "content_id": "d7bc93ef6cf7579225026dc1f26d091b4473d4d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 400, "license_type": "no_license", "max_line_length": 63, "num_lines": 15, "path": "/cycles/forms.py", "repo_name": "nithin9296/awitha", "src_encoding": "UTF-8", "text": "from django import forms\nfrom django.forms import ModelChoiceField, ModelForm\n\nfrom .models import profitandloss, balancesheet\n\nclass profitandlossform(ModelForm):\n\tclass Meta:\n\t\tmodel = profitandloss\n\t\tfields = ('documentid', 'glcode', 'gldescription', 'amount')\n\n\nclass balancesheetform(ModelForm):\n\tclass Meta:\n\t\tmodel = balancesheet\n\t\tfields = ['documentid', 'glcode', 'gldescriptions', 'amount']" }, { "alpha_fraction": 0.5100671052932739, "alphanum_fraction": 0.5154362320899963, "avg_line_length": 28.84000015258789, "blob_id": "092b30ad413107d0ea131e801c172b65aa07db7e", "content_id": "533854671f3783e15ebd4e7cb13d899cfd76fe21", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 745, "license_type": "no_license", "max_line_length": 113, "num_lines": 25, "path": "/operating/templates/operating/login.html", "repo_name": "nithin9296/awitha", "src_encoding": "UTF-8", "text": "{% extends 'base.html' %}\n\n{% block content %}\n <h2>Login</h2>\n <form method=\"post\">\n {% csrf_token %}\n {% for field in form %}\n <p>\n {{ field.label_tag }}<br>\n {{ field }}\n {% if field.help_text %}\n <small style=\"color: grey\">{{ field.help_text }}</small>\n {% endif %}\n {% for error in field.errors %}\n <p style=\"color: red\">{{ error }}</p>\n {% endfor %}\n </p>\n {% endfor %}\n <a class=\"btn btn-primary btn-lg\" href=\"/operating/options/\"<button type=\"submit\">Login &raquo;</button> </a>\n </form>\n\n\n <h4> New to Audit Analytics ? </h4>\n <p><a class=\"btn btn-primary btn-lg\" href=\"/operating/signup/\" role=\"button\">signup &raquo;</a></p>\n{% endblock %}" }, { "alpha_fraction": 0.6297739744186401, "alphanum_fraction": 0.6352299451828003, "avg_line_length": 43.2068977355957, "blob_id": "72e509eef9917f57ff469e57105a64e7cea4a54d", "content_id": "d09f6305ef6853ac4be4faeae22275b08ff20cee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1283, "license_type": "no_license", "max_line_length": 218, "num_lines": 29, "path": "/operating/urls.py", "repo_name": "nithin9296/awitha", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\n\n\n\nfrom .views import npmargin_detail_view, login_page, ChartData, ChartData_op2, npmargin_view, signup, trialbalanceanalysis, rationop1, options, productview, qs_to_local_csv, CSVDownloadView, DataView, about, feedback\n\nurlpatterns = [\n #url(r'^$', npmarginListView.as_view(), name='npmargin_changelist'),\n #url(r'^$', HomeView.as_view(), name='home'),\n url(r'^npmargin/$', npmargin_view),\n #url(r'^compare/$', npmargin_compare),\n url(r'^login/$', login_page),\n url(r'^signup/$', signup, name='signup'),\n url(r'^trialbalanceanalysis/$', trialbalanceanalysis),\n\n #url(r'^add/$', npmarginListView.as_view(), name='list'),\n url(r'^(?P<pk>\\d+)/$', npmargin_detail_view),\n url(r'^api/chart/data/$', ChartData.as_view(), name='api-data'),\n url(r'^api/chart/data2/$', ChartData_op2.as_view(), name='api-data2'),\n #url(r'^operating/(?P<pk>\\d+)/$', npmarginDetailView.as_view()),\n #url(r'^handson_view/', handson_table, name=\"handson_view\"),\n url(r'^rationop1/$', rationop1),\n url(r'^options/$', options),\n url(r'^about/$', about),\n url(r'^feedback/$', feedback),\n url(r'^productview/$', productview),\n url(r'^qs_to_local_csv/$', qs_to_local_csv),\n url(r'^DataView/$', DataView.as_view()),\n ]\n\n" }, { "alpha_fraction": 0.5444928407669067, "alphanum_fraction": 0.5653391480445862, "avg_line_length": 43.63888931274414, "blob_id": "cdaa1bed83ff93a973626609ffcfed9493584b63", "content_id": "0c150cae0c02f92997cbe353bcb832d33de07555", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3214, "license_type": "no_license", "max_line_length": 232, "num_lines": 72, "path": "/operating/migrations/0001_initial.py", "repo_name": "nithin9296/awitha", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.5 on 2018-02-11 14:54\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='CompanyNetPercentage',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('region', models.CharField(choices=[('United States', 'United States'), ('Aus, NZ & Canada', 'Aus, NZ & Canada'), ('Europe', 'Europe'), ('Emerging Markets', 'Emerging Markets'), ('Japan', 'Japan')], max_length=25)),\n ('industry', models.CharField(choices=[('Advertising', 'Advertising'), ('aerospace', 'aerospace'), ('air transport', 'air transport'), ('apparel', 'apparel')], max_length=25)),\n ('npy2015', models.FloatField()),\n ('npy2016', models.FloatField()),\n ('npy2017', models.FloatField()),\n ],\n ),\n migrations.CreateModel(\n name='Industry',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('industry_name', models.CharField(max_length=25)),\n ],\n ),\n migrations.CreateModel(\n name='npmargin',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('npy2015', models.IntegerField(default=0)),\n ('npy2016', models.IntegerField(default=0)),\n ('npy2017', models.IntegerField(default=0)),\n ('industry', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='operating.Industry')),\n ],\n ),\n migrations.CreateModel(\n name='operatingmargin',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('opy2015', models.IntegerField(default=0)),\n ('opy2016', models.IntegerField(default=0)),\n ('opy2017', models.IntegerField(default=0)),\n ('industry', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='operating.Industry')),\n ],\n ),\n migrations.CreateModel(\n name='Region',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('region_name', models.CharField(max_length=20)),\n ],\n ),\n migrations.AddField(\n model_name='operatingmargin',\n name='region',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='operating.Region'),\n ),\n migrations.AddField(\n model_name='npmargin',\n name='region',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='operating.Region'),\n ),\n ]\n" }, { "alpha_fraction": 0.6907792091369629, "alphanum_fraction": 0.7085431814193726, "avg_line_length": 35.01980209350586, "blob_id": "b79ae086a47c42501534e5d20bf3878df8b80a7a", "content_id": "48d58ab3898f711ec55d33d64f66159c2981d2db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10921, "license_type": "no_license", "max_line_length": 88, "num_lines": 303, "path": "/operating/models.py", "repo_name": "nithin9296/awitha", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.auth import authenticate, login, get_user_model\nfrom django.conf import settings\nfrom django.contrib.contenttypes.fields import GenericForeignKey\nfrom django.contrib.contenttypes.models import ContentType\nfrom io import StringIO\nfrom django.core.files import File\nfrom django.db import models\nfrom django.utils import timezone\nfrom operating.utils import convert_to_dataframe\nfrom .signals import object_viewed_signal\nfrom .utils import get_client_ip\n\n# Create your models here.\n# class npmarginManager(models.Manager):\n# \tdef all(self):\n# \t\treturn self.get_queryset()\n\n# \tdef get_by_id(self, id):\n# \t\tqs = self.get_queryset().filter(id=id)\n# \t\tif qs.count() == 1:\n# \t\t\treturn qs.first()\n# \t\treturn None\n\nclass Region(models.Model):\n\tregion_name = models.CharField(max_length=20)\n\n\tdef __str__(self):\n\t\treturn str(self.region_name)\n\nclass Industry(models.Model):\n\t\n\tindustry_name = models.CharField(max_length=25)\n\n\tdef __str__(self):\n\t\treturn str(self.industry_name)\n\n\nclass UserName(models.Model):\n\tuser_name = models.CharField(max_length=25)\n\n\tdef __str__(self):\n\t\treturn self.user_name\n\n\nclass npmargin(models.Model):\n\tregion = models.ForeignKey(Region, on_delete=models.CASCADE, null=True)\n\tindustry = models.ForeignKey(Industry, on_delete=models.CASCADE, null=True)\n\tnetmargin_percentage_2015 = models.DecimalField(max_digits=5, decimal_places=2)\n\tnetmargin_percentage_2016 = models.DecimalField(max_digits=5, decimal_places=2)\n\tnetmargin_percentage_2017 = models.DecimalField(max_digits=5, decimal_places=2)\n\tgross_profit_margin_2015 = models.DecimalField(max_digits=5, decimal_places=2)\n\tgross_profit_margin_2016 = models.DecimalField(max_digits=5, decimal_places=2)\n\tgross_profit_margin_2017 = models.DecimalField(max_digits=5, decimal_places=2)\n\tdebtequity_ratio_2015 = models.DecimalField(max_digits=5, decimal_places=2)\t\n\tdebtequity_ratio_2016 = models.DecimalField(max_digits=5, decimal_places=2)\t\n\tdebtequity_ratio_2017 = models.DecimalField(max_digits=5, decimal_places=2)\t\n\n\tdef __str__(self):\n\t\treturn str(self.industry)\n\n\n\n\t#objects = npmarginManager()\n\n\t#def get_absolute_url(self):\n\t\t#return \"/operating/{slug}/\".format(slug=self.slug)\n\n\nREGION_CHOICES=[\n\t('United States', 'United States'),\n\t('Aus, NZ & Canada', 'Aus, NZ & Canada'),\n\t('Europe', 'Europe'),\n\t('Emerging Markets', 'Emerging Markets'),\n\t('Japan','Japan'),\n\t]\n\nINDUSTRY_CHOICES=[\n\t('Advertising', 'Advertising'),\n\t('aerospace', 'aerospace'),\n\t('air transport', 'air transport'),\n\t('apparel', 'apparel'),\n\t('Auto & Truck', 'Auto & Truck'),\n\t('Auto Parts', 'Auto Parts'),\n\t('Utility (Water)', 'Utility (Water)'),\n\t('Utility (General)', 'Utility (General)'),\n\t('Trucking', 'Trucking'),\n\t('Transportation (Railroads)', 'Transportation (Railroads)'),\n\t('Transportation', 'Transportation'),\n\t('Tobacco', 'Tobacco'),\n\t('Telecom. Services', 'Telecom. Services'),\n\t('Telecom. Equipment', 'Telecom. Equipment'),\n\t('Telecom (Wireless)', 'Telecom (Wireless)'),\n\t('Steel', 'Steel'),\n\t('Software (System & Application)', 'Software (System & Application)'),\n\t('Software (Internet)', 'Software (Internet)'),\n\t('Software (Entertainment)', 'Software (Entertainment)'),\n\t('Shoe', 'Shoe'),\n\t('Shipbuilding & Marine', 'Shipbuilding & Marine'),\n\t('Semiconductor Equip', 'Semiconductor Equip'),\n ('Semiconductor', 'Semiconductor'),\n\t('Rubber& Tires', 'Rubber& Tires'),\n\t('Retail (Special Lines)','Retail (Special Lines)'),\n\t('Retail (Online)', 'Retail (Online)'), \n\t('Retail (Grocery and Food)', 'Retail (Grocery and Food)'),\n\t('Retail (General)', 'Retail (General)'),\n\t('Retail (Distributors)', 'Retail (Distributors)'),\n\t('Retail (Building Supply)', 'Retail (Building Supply)'),\n\t('Retail (Automotive)', 'Retail (Automotive)'),\n\t('Restaurant/Dining', 'Restaurant/Dining'),\n\t('Reinsurance', 'Reinsurance'),\n\t('Recreation', 'Recreation'),\n\t('Real Estate (Operations & Services)', 'Real Estate (Operations & Services)'),\n\t('Real Estate (General/Diversified)', 'Real Estate (General/Diversified)'),\n\t('Real Estate (Development)', 'Real Estate (Development)'),\n\t('R.E.I.T.', 'R.E.I.T.'),\n\t('Publishing & Newspapers', 'Publishing & Newspapers'),\n\t('Precious Metals', 'Precious Metals'),\n\t('Power','Power'), \n\t('Paper/Forest Products', 'Paper/Forest Products'),\n\t('Packaging & Container', 'Packaging & Container'),\n\t('Oilfield Svcs/Equip.', 'Oilfield Svcs/Equip.'),\n\t('Oil/Gas Distribution', 'Oil/Gas Distribution'),\n\t('Oil/Gas (Production and Exploration)', 'Oil/Gas (Production and Exploration)'),\n\t('Oil/Gas (Integrated)', 'Oil/Gas (Integrated)'),\n\t('Office Equipment & Services', 'Office Equipment & Services'),\n\t('Metals & Mining', 'Metals & Mining'),\n\t('Machinery', 'Machinery'),\n\t('Investments & Asset Management', 'Investments & Asset Management'),\n\t('Insurance (Prop/Cas.)', 'Insurance (Prop/Cas.)'),\n\t('Insurance (Life)', 'Insurance (Life)'),\n\t('Insurance (General)', 'Insurance (General)'),\n\t('Information Services', 'Information Services'),\n\t('Household Products', 'Household Products'),\n\t('Hotel/Gaming', 'Hotel/Gaming'),\n\t('Hospitals/Healthcare Facilities', 'Hospitals/Healthcare Facilities'),\n\t('Homebuilding', 'Homebuilding'),\n\t('Heathcare Information and Technology', 'Heathcare Information and Technology'),\n\t('Healthcare Support Services', 'Healthcare Support Services'), \n\t('Healthcare Products', 'Healthcare Products'),\n\t('Green & Renewable Energy', 'Green & Renewable Energy'),\n\t('Furn/Home Furnishings', 'Furn/Home Furnishings'),\n\t('Food Wholesalers', 'Food Wholesalers'),\n\t('Food Processing', 'Food Processing'),\n# Financial Svcs. (Non-bank & Insurance)\n# Farming/Agriculture\n# Environmental & Waste Services\n# Entertainment\n# Engineering/Construction\n# Electronics (General)\n# Electronics (Consumer & Office)\n# Electrical Equipment\n# Education\n# Drugs (Pharmaceutical)\n# Drugs (Biotechnology)\n# Diversified\n# Construction Supplies\n# Computers/Peripherals\n# Computer Services\n# Coal & Related Energy\n# Chemical (Specialty)\n# Chemical (Diversified)\n# Chemical (Basic)\n# Cable TV\n# Business & Consumer Servi\n# Building Materials\n# Brokerage & Investment Ba\n# Broadcasting\n# Beverage (Soft)\n# Beverage (Alcoholic)\n# Banks (Regional)\n# Bank (Money Center)\n# )\n\n\t]\n\n\n\n#User = get_user_model()\nclass CompanyNetPercentage(models.Model):\n\tuser = models.CharField(max_length=25)\n\t#user \t = models.ForeignKey(UserName, on_delete=models.CASCADE, null=True)\n\tregion = models.CharField(max_length=25, choices=REGION_CHOICES)\n\tindustry = models.CharField(max_length=25, choices=INDUSTRY_CHOICES)\n\t#industry = forms.ModelChoiceield(queryset= Industry.objects.values_list('name'))\n\tgross_profit_margin_2015 = models.IntegerField(default=0)\n\tgross_profit_margin_2016 = models.IntegerField(default=0)\n\tgross_profit_margin_2017 = models.IntegerField(default=0)\n\tdebtequity_ratio_2015 = models.IntegerField(default=0)\n\tdebtequity_ratio_2016 = models.IntegerField(default=0)\n\tdebtequity_ratio_2017 = models.IntegerField(default=0)\n\tnetmargin_percentage_2015 = models.DecimalField(max_digits=5, decimal_places=2)\n\tnetmargin_percentage_2016 = models.DecimalField(max_digits=5, decimal_places=2)\n\tnetmargin_percentage_2017 = models.DecimalField(max_digits=5, decimal_places=2)\n\n\tdef __str__(self):\n\t\treturn str(self.user)\n\n\tdef clean_user(self):\n\t\t\tuser = self.cleaned_data.get('user')\n\t\t\tqs = CompanyNetPercentage.objects.filter(user=user)\n\t\t\tif qs.exists():\n\t\t\t\traise forms.ValidationError(\"User already exists\")\n\t\t\treturn user\n\n# class operatingmargin(models.Model):\n# \tregion = models.ForeignKey(Region, on_delete=models.CASCADE, null=True)\n# \tindustry = models.ForeignKey(Industry, on_delete=models.CASCADE, null=True)\n# \topy2015 = models.IntegerField(default=0)\n# \topy2016 = models.IntegerField(default=0)\n# \topy2017 = models.IntegerField(default=0)\n\n# \tdef __str__(self):\n# \t\treturn str(self.industry)\n\n\n\nclass trialbalance(models.Model):\n\tuser = models.CharField(max_length=25, default=0)\n\tregion = models.CharField(max_length=25, choices=REGION_CHOICES, default=0)\n\tindustry = models.CharField(max_length=25, choices=INDUSTRY_CHOICES, default=0)\n\tglcode = models.IntegerField(default=0)\n\tgldescription = models.CharField(max_length=60)\n\tclassification = models.CharField(max_length=30)\n\tsubclassification = models.CharField(max_length=30)\n\tdebit_2017 = models.IntegerField(default=0)\n\tcredit_2017 = models.IntegerField(default=0)\n\tdebit_2016 = models.IntegerField(default=0)\n\tcredit_2016 = models.IntegerField(default=0)\n\tdebit_2015 = models.IntegerField(default=0)\n\tcredit_2015 = models.IntegerField(default=0)\n\n\tdef __str__(self):\n\t\treturn str(self.gldescription)\n\n\n\n\nclass ObjectViewed(models.Model):\n\tuser = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True)\n\tcontent_type = models.ForeignKey(ContentType, on_delete=models.SET_NULL, null=True)\n\tobject_id = models.PositiveIntegerField()\n\tip_address = models.CharField(max_length=120, blank=True, null=True)\n\tcontent_object = GenericForeignKey('content_type', 'object_id')\n\ttimestamp = models.DateTimeField(auto_now_add=True)\n\n\tdef __str__(self, ):\n\t\treturn \"%s viewed: %s\" %(self.content_object, self.timestamp)\n\n\tclass Meta:\n\t\tordering = ['-timestamp']\n\t\tverbose_name = 'Object Viewed'\n\t\tverbose_name_plural = 'Objects Viewed'\n\ndef object_viewed_recevier(sender, instance, request, *args, **kwargs):\n\tc_type = ContentType.get_for_model(sender)\n\tip_adress = None\n\ttry:\n\t\tip_adress = get_client_ip(request)\n\texcept:\n\t\tpass\n\tnew_view_instance = ObjectViewed.objects.create(\n\t\t\t\tuser=request.user,\n\t\t\t\tcontent_type=c_type,\n\t\t\t\tobject_id=instance.id,\n\t\t\t\tip_address=ip_address\n\t\t\t\t)\nobject_viewed_signal.connect(object_viewed_recevier)\n\n\nclass DataSetManager(models.Manager):\n\tdef create_new(self, qs, fields=None):\n\t\tdf = convert_to_dataframe(qs, fields=fields)\n\t\tfp = StringIO()\n\t\tfp.write(df.to_csv())\n\t\tdate = timezone.now().strftime(\"%m-%d-%y\")\n\t\tmodel_name = slugify(qs.model.__name__)\n\t\tfilename = \"{}-{}.csv\".format(model_name, date)\n\t\tobj = self.model(\n\t\t\tname = filename.replace('.csv', ''),\n app = slugify(qs.model._meta.app_label),\n model = qs.model.__name__,\n lables = fields,\n object_count = qs.count()\n )\n\t\tobj.save()\n\t\tobj.csvfile.save(filename, File(fp)) #saves file to the file field\n\t\treturn obj\n\nclass DatasetModel(models.Model):\n name = models.CharField(max_length=120)\n app = models.CharField(max_length=120, null=True, blank=True)\n model = models.CharField(max_length=120, null=True, blank=True)\n lables = models.TextField(null=True, blank=True)\n object_count = models.IntegerField(default=0)\n csvfile = models.FileField(upload_to='datasets/', null=True, blank=True)\n timestamp = models.DateTimeField(auto_now_add=True)\n\n\n\n\nclass feedback(models.Model):\n\tfeedback = models.CharField(max_length=10000)\n\n\n\n\n\n\n\n" } ]
14
AashishMehtoliya/titanic
https://github.com/AashishMehtoliya/titanic
2bcf9f038f211270621006b7280b887539686466
1b1f95490ba85aff21034471df357f06e688da6b
908967fbd79694bb24a446552e85686d1e36848c
refs/heads/master
2020-04-20T02:24:24.709210
2019-05-30T06:05:52
2019-05-30T06:05:52
168,570,772
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6256157755851746, "alphanum_fraction": 0.6631773114204407, "avg_line_length": 31.258277893066406, "blob_id": "5569c9a17d67b0989b85552d0c079d4781c24992", "content_id": "5b5a96927c8fa9e5a1b92c5b027744a218c5d941", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4872, "license_type": "no_license", "max_line_length": 96, "num_lines": 151, "path": "/titan_pro.py", "repo_name": "AashishMehtoliya/titanic", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jul 7 15:45:54 2018\n\n@author: Aashish Mehtoliya\n\"\"\"\n\nimport pandas as pd\n#import matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.svm import SVC\nimport seaborn as sns\nsns.set()\n\n#load the data\ndf_train = pd.read_csv('train.csv')\ndf_test = pd.read_csv('test.csv')\n\n\n#analysing and altering data\ndf_train_test = [df_train,df_test]\n\nfor dataset in df_train_test:\n dataset['Title'] = dataset['Name'].str.extract('([A-Za-z]+)\\.'\n ,expand=False)\ntitle_mapping={'Mr':0,'Miss':1,'Mrs':2,'Master':3,\n 'Dr':3,'Rev':3,'Major':3,'Mile':3,'Col':3,\n 'Ms':3,'Capt':3,'Jonkheer':3,'Lady':3,'Sir':3,\n 'Don':3,'Mme':3,'Countless':3}\nfor dataset in df_train_test:\n dataset['Title']=dataset['Title'].map(title_mapping)\n \ndf_train['Title'].fillna(0,inplace=True)\ndf_test['Title'].fillna(1,inplace=True)\n \nsex_mapping={'male':0,'female':1}\nfor dataset in df_train_test:\n dataset['Sex']=dataset['Sex'].map(sex_mapping)\n \ndef bar_chart(feature):\n Survived=df_train[df_train['Survived']==1][feature].values\n Dead=df_train[df_train['Survived']==0][feature].values\n df = pd.DataFrame([Survived,Dead])\n df.index=['Survived','Dead']\n df.plot(kind='bar',stacked=True,figsize=(10,5))\n \ndf_train['Age'].fillna(df_train.groupby('Title')['Age'].transform('median'),inplace=True)\ndf_test['Age'].fillna(df_test.groupby('Title')['Age'].transform('median'),inplace=True) \n\nfor dataset in df_train_test:\n dataset.loc[dataset['Age']<=16,'Age']=0\n dataset.loc[(dataset['Age']>16) & (dataset['Age']<=26),'Age']=1\n dataset.loc[(dataset['Age']>26) & (dataset['Age']<=36),'Age']=2\n dataset.loc[(dataset['Age']>36) & (dataset['Age']<=62),'Age']=3\n dataset.loc[dataset['Age']>62,'Age']=4\n\nfor dataset in df_train_test:\n dataset['Embarked']=dataset['Embarked'].fillna('S')\n \nembarked_mapping = {'S':0, 'C':1,'Q':2}\nfor dataset in df_train_test:\n dataset['Embarked']=dataset['Embarked'].map(embarked_mapping)\n\ndf_train['Fare'].fillna(df_train.groupby('Pclass')['Fare'].transform('median'),inplace=True)\ndf_test['Fare'].fillna(df_test.groupby('Pclass')['Fare'].transform('median'),inplace=True)\n\n\n\nfor dataset in df_train_test:\n dataset.loc[dataset['Fare']<=16,'Fare']=0\n dataset.loc[(dataset['Fare']>17) & (dataset['Fare']<=30),'Fare']=1\n dataset.loc[(dataset['Fare']>30) & (dataset['Fare']<=100),'Fare']=2\n dataset.loc[dataset['Fare']>100,'Fare']=4\n \nfor dataset in df_train_test:\n dataset['Cabin']=dataset['Cabin'].str[:1]\n \ncabin_mapping={'A':0.5,'B':1.0,'C':1.5,'D':2,'E':2.5,'F':3,'G':3.5,'T':4}\nfor dataset in df_train_test:\n dataset['Cabin']= dataset['Cabin'].map(cabin_mapping)\n \ndf_train['Cabin'].fillna(df_train.groupby('Pclass')['Cabin'].transform('median'),inplace=True)\ndf_test['Cabin'].fillna(df_test.groupby('Pclass')['Cabin'].transform('median'),inplace=True)\n\n\n#feature engineering\ndf_train['FamilySize']=df_train['SibSp']+df_train['Parch']+1\ndf_test['FamilySize']=df_test['SibSp']+df_test['Parch']+1\n\nfamily_mapping={1: 0, 2: 0.5, 3: 1, 4: 1.5, 5: 2, 6: 2.5, 7: 3.0, 8: 3.5, 9: 4, 10: 4.5, 11: 5 }\n\nfor dataset in df_train_test:\n dataset['FamilySize']=dataset['FamilySize'].map(family_mapping)\n \nfeature_drop=['Parch','SibSp','Ticket','PassengerId','Name']\ndf_train=df_train.drop(feature_drop,axis=1)\ndf_test=df_test.drop(feature_drop,axis=1)\n\n\nx=df_train.iloc[:,[1,2,3,4,5,6,7,8]]\ny=df_train.iloc[:,0]\n\n\nknn = KNeighborsClassifier(n_neighbors=13)\nknn.fit(x,y)\ny_pred=knn.predict(df_test.iloc[:,:].values)\norig= pd.read_csv('test.csv')\norig=orig.iloc[:,0:1]\norig[\"Survived\"]=y_pred[0:]\norig.to_csv(\"Submit.csv\",header=True,index=False)\nans=pd.read_csv(\"Submit.csv\")\n\n\n#prediction by decision tree\ndes_tree = DecisionTreeClassifier(criterion='gini')\ndes_tree.fit(x,y)\n\ny_pred_1=des_tree.predict(df_test.iloc[:,:].values)\norig_1=pd.read_csv('test.csv')\norig_1=orig_1.iloc[:,0:1]\norig_1[\"Survived\"]=y_pred_1[0:]\norig_1.to_csv(\"Submit_1.csv\",header=True,index=False)\nans_1=pd.read_csv(\"Submit_1.csv\")\n\n\n\nLRR = LogisticRegression()\nLRR.fit(x,y)\n\ny_pred_2=LRR.predict(df_test.iloc[:,:].values)\norig_2=pd.read_csv('test.csv')\norig_2=orig_2.iloc[:,0:1]\norig_2[\"Survived\"]=y_pred_2[0:]\norig_2.to_csv(\"Submit_2.csv\",header=True,index=False)\nans_2=pd.read_csv(\"Submit_2.csv\")\n\n\nRfc = RandomForestClassifier(n_estimators =8,random_state=0)\nRfc.fit(x, y)\n\ny_pred_3=Rfc.predict(df_test.iloc[:,:].values)\norig_3=pd.read_csv('test.csv')\norig_3=orig_3.iloc[:,0:1]\norig_3[\"Survived\"]=y_pred_3[0:]\norig_3.to_csv(\"Submit_3.csv\",header=True,index=False)\nans_3=pd.read_csv(\"Submit_3.csv\")\n\n" } ]
1
denrsch/home_terminal
https://github.com/denrsch/home_terminal
639fbf251358a8225f4d6c1a5d9bfb366fa26e2e
af1a6ba0475c2a101647ae10545f867a32cd9f76
77e02d9154dd929f7c931729bf37749955274633
refs/heads/master
2020-12-24T14:45:40.471972
2015-04-03T00:09:23
2015-04-03T00:09:23
33,276,082
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7161904573440552, "alphanum_fraction": 0.7161904573440552, "avg_line_length": 24.047618865966797, "blob_id": "c603fa316c7660f7cd1e759413bf5960a8c0be77", "content_id": "66ee11324252cc77fffac09a7f37d7c8ab53cad9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 525, "license_type": "no_license", "max_line_length": 65, "num_lines": 21, "path": "/Circle_calculations.py", "repo_name": "denrsch/home_terminal", "src_encoding": "UTF-8", "text": "#calculating the area of a circle with input requests\n\nimport math \nradius_input = input(\"What size is the radius? \")\nr = int(radius_input)\n\ncircle = math.pi * r * r\n\nprint \"The area of the circle is %s\" % (circle)\n\n#calculating the area of a rectangle with a user prompt\n\nwidth_input = input(\"how wide is your rectangle? \")\nwidth = int(width_input)\n\nlength_input = input(\"how long is your rectangle? \")\nlength = int(length_input)\n\nrectangle = length * width\n\nprint \"looks like the area of your rectangle is %s\" % (rectangle)" }, { "alpha_fraction": 0.8199999928474426, "alphanum_fraction": 0.8199999928474426, "avg_line_length": 24, "blob_id": "43e29834c9dc2e6707e5d9ec1b524b2a45f7db58", "content_id": "0282bde35b1b0fdf20a62070ac15bd47b8c29fac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 50, "license_type": "no_license", "max_line_length": 33, "num_lines": 2, "path": "/README.md", "repo_name": "denrsch/home_terminal", "src_encoding": "UTF-8", "text": "# home_terminal\nPrograms through my Text Wrangler\n" }, { "alpha_fraction": 0.6380090713500977, "alphanum_fraction": 0.6832579374313354, "avg_line_length": 21.200000762939453, "blob_id": "17855b2a5197808ace6657ca65c043f54abd16f9", "content_id": "a81a6ca17c8b28904353a7cbe73420a8a58d74f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 221, "license_type": "no_license", "max_line_length": 68, "num_lines": 10, "path": "/thinkinglikecomputerscientist1.py", "repo_name": "denrsch/home_terminal", "src_encoding": "UTF-8", "text": "#Compounding Interest Assignment\n\np = 1000\nn = 12\nr = 0.08\ntime_lapse = input(\"please enter the time period you want to use. \")\nt = int(time_lapse)\ncompounded_interest = p * (1 + (r/n)) ** (n*t)\n\nprint compounded_interest" } ]
3
Skuldur/somnus
https://github.com/Skuldur/somnus
bd759794fbeb8b562a0863aafcc31a8ce47ad246
45cec3e2ed3f27e2a024ea214651af30a0e6fe60
d9082b1c64096f3d39b0d037a5c23386d510f235
refs/heads/master
2022-12-01T21:18:40.765549
2020-08-18T21:22:57
2020-08-18T21:22:57
279,154,687
5
1
MIT
2020-07-12T21:57:01
2020-08-15T08:26:00
2020-08-17T21:14:39
Python
[ { "alpha_fraction": 0.7017144560813904, "alphanum_fraction": 0.7137690782546997, "avg_line_length": 71.46601867675781, "blob_id": "6a5ccaf4961fe039969baf3300b95f2b61d0d53c", "content_id": "6e0380c96974f00f87dbecbad4b00f271da53208", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 7466, "license_type": "permissive", "max_line_length": 578, "num_lines": 103, "path": "/README.md", "repo_name": "Skuldur/somnus", "src_encoding": "UTF-8", "text": "# Somnus\n\n![Build](https://github.com/skuldur/somnus/workflows/build/badge.svg)\n![PyPI - License](https://img.shields.io/pypi/l/somnus)\n![GitHub release (latest SemVer)](https://img.shields.io/github/v/release/skuldur/somnus)\n\nSomnus offers easy keyword detection for everyone. It allows you to listen for and detect a specific keyword in a continuous stream of audio data. It uses keyword detection models developed by Google and Baidu to detect instances of the keyword and by using these small-footprint models Somnus keeps memory usage and latency to a minimum.\n\n## Getting started\n\n### Prerequisites\n\n#### Linux\n\n```bash\nsudo apt-get install portaudio19-dev python-pyaudio python3-pyaudio\n```\n\n#### Windows 10\n\nYou need to install Microsoft C++ Build Tools before you can install Somnus.\n\n### Installation\n\nUse the package manager [pip](https://pip.pypa.io/en/stable/) to install the Somnus package and the CLI\n\n```bash\npip install somnus\n```\n\n## Quickstart\n\nSomnus makes it simple to go from raw audio recordings to a working keyword detection model. To get started create a few recordings of yourself saying the keyword and download the datasets in the [Recommended datasets section](#recommended-datasets). Move the files to the raw audio directory you specify by running `somnus configure`. \n\nNow that you have your raw audio files set up, you can use our default configurations to create a highly effective keyword detection model.\n\n1. Run `somnus augment_audio` to augment the audio files with background noise and create your audio dataset\n2. Run `somnus preprocess` to normalize the data stored in the augmented audio files and create a dataset that's been prepared for our keyword detection models\n3. Run `somnus train --epochs 10` to train a keyword detection model using the dataset you just created. The resulting model will be saved to `saved_model.h5` in your current working directory.\n4. Run `somnus test` to test the accuracy of the model you just trained using a test dataset that was generated by the `preprocess` command.\n\nNow that you have a trained model you can use the Somnus client to detect a keyword using your microphone. First run `somnus list_microphones` to find the device index of your microphone. Then run the following test script using your microphone's device index and verify that the keyword detection is working.\n\n```python\nfrom somnus.somnus import Somnus\n\ns = Somnus(model='./saved_model.h5', device_index=1)\nactivated = s.listen()\n\nif activated:\n\tprint('You did it!')\nelse:\n\tprint('Something went wrong!')\n```\n\n## Usage\n\n### Somnus\n\nSomnus can be used to listen for an instance of a selected keyword in a continuous stream of audio data from a single channel from a microphone. To find the device index of your microphone run `somnus list_microphones`.\n\nSomnus can handle all the audio interfacing for you so that you only need to initialize Somnus and and call the `listen()` and it will start listening to your microphone until it detects the keyword. Somnus also offers a nonblocking method (`detect_keyword()`) that allows the user to process the audio themselves and only use Somnus to detect a keyword in an audio time series passed to `detect_keyword()` as an argument.\n\n**Parameters**\n* **model (default: '')**: The relative or absolute path to a Keras model file for the keyword model.\n* **device_index (default: 0)**: The device index of the microphone that Somnus should listen to.\n* **threshold (default: 0.5)**: A threshold for how confident Somnus has to be for it to detect the keyword\n* **audio_config**: A dictionary containing the configuration specific to the audio time series. It contains the following:\n\t* **data_shape (default: (101, 40, 1))**: The input shape for the keyword model\n\t* **sample_duration (default: 1)**: How long the input of the keyword model should be in seconds\n\t* **n_filters (default: 40)**: The number of filters in each frame\n\t* **win_length (default: 400)**: The length of each window in frames\n\t* **win_hop (default: 160)**: the number of frames between the starting frame of each consecutive window.\n\n## CLI\n\n[The Somnus CLI Documentation](https://github.com/Skuldur/somnus/blob/master/cli/README.md)\n\n## Models\n\nCurrently Somnus offers the choice between the following models:\n\n| Name | Original paper | Description | Total parameters | Size |\n|----------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------|-------------------------|-----------|\n| cnn-one-stride | [Convolutional Neural Networks for Small-footprint Keyword Spotting](https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/43969.pdf) | A frequency strided convolutional model with a stride of 4 and no pooling | 381k | 1.5MB |\n| cnn-trad-pool | [Convolutional Neural Networks for Small-footprint Keyword Spotting](https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/43969.pdf) | A keyword detection model with two convolutional layers followed by max pooling | 649k | 2.5MB |\n| crnn-time-stride | [Convolutional Recurrent Neural Networks for Small-Footprint Keyword Spotting](https://arxiv.org/ftp/arxiv/papers/1703/1703.05390.pdf) | A convolutional recurrent network with time striding | 88k | 380KB |\n\n## Recommended datasets\n\nBefore you start we highly recommend downloading pre-made datasets for both the negative examples and background noise. For negative examples we recommend the [Librispeech](http://www.openslr.org/12/) dataset. You can pick any of the dev, test, or train datasets. To start with we recommend using the `train-clean-100.tar.gz` dataset and moving on to the larger datasets if needed. For background noise we recommend the [DEMAND](https://asa.scitation.org/doi/abs/10.1121/1.4799597) dataset that you can download from Kaggle [here](https://www.kaggle.com/aanhari/demand-dataset).\n\nExtract the data and move the Librispeech dataset to the raw audio directory and place it in the `negatives/` sub-directory and the DEMAND dataset to the `backgrounds/` sub-directory.\n\n`positives/` will then contain utterances of your keyword in various conditions using multiple different voices and dialects. Additionally, you can add custom negative examples to the `negatives/` sub-directory. We recommend that a majority of these utterances use a microphone similar to the one you will be using in the final product. This is because data gathered from different types of microphones can look completely different, e.g. a model trained on utterances recorded using headset microphone will probably not work well with a far field microphone array.\n\nIf your model is intended to be used with many different types of microphones then we recommend gathering positive and negative recordings using as many different microphones as you can.\n\n## Contributing\nPull requests are welcome. For major changes, please open an issue first to discuss what you would like to change.\n\n## License\n[MIT](https://choosealicense.com/licenses/mit/)\n\n\n" }, { "alpha_fraction": 0.6638625860214233, "alphanum_fraction": 0.6708727478981018, "avg_line_length": 36.05194854736328, "blob_id": "31946e5a4a00b62dffad79c36a07b5f9eeb4eb8d", "content_id": "5bcc77606c3321a3f9052fe8e2fc9b130faf5389", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2853, "license_type": "permissive", "max_line_length": 142, "num_lines": 77, "path": "/somnus/preprocess_audio.py", "repo_name": "Skuldur/somnus", "src_encoding": "UTF-8", "text": "import glob\nimport os\n\nimport librosa\nimport librosa.display\nimport numpy as np\nfrom tensorflow.keras import utils\nfrom tqdm import tqdm\n\n\ndef create_dataset(base_dir, n_filters, show_progress, win_length, win_hop):\n \"\"\"\n Create a dataset using the normalized representations of the audio files in base_dir\n\n Args:\n base_dir (string): The directory containing all augmented audio files\n n_filters (int): The number of filters in each frame\n show_progress (boolean): Boolean option to decide whether to show a progress bar (NOTE: showing progress bar may slow down processing)\n win_length (int): The length of each window in frames\n win_hop (int): the number of frames between the starting frame of each consecutive window.\n\n Returns:\n data (array): A 4D array of normalized audio files stored in arrays in the shape of (X, n_filters, 1), where X is the\n number of windows.\n labels (array): A 1D array of labels corresponding to an array at the same index in the data array. \n \"\"\"\n data = []\n labels = []\n\n # read total number of files for the progress bar\n _, _, files = next(os.walk(base_dir))\n total_files = len(files)\n\n if show_progress:\n pbar = tqdm(total=total_files)\n\n def preprocess_data(path, label):\n for filename in glob.iglob(path):\n y, sr = librosa.load(filename, sr = 16000)\n x = melnormalize(y, n_filters, win_length, win_hop)\n data.append(x)\n labels.append(label)\n\n if show_progress:\n pbar.update(1)\n\n preprocess_data(os.path.join(base_dir, 'positive*'), 0)\n preprocess_data(os.path.join(base_dir, 'negative*'), 1)\n preprocess_data(os.path.join(base_dir, 'background*'), 2)\n\n labels = utils.to_categorical(labels)\n return np.array(data), np.array(labels)\n \n\ndef melnormalize(audio_time_series, n_filters, win_length, win_hop):\n \"\"\"\n Normalize and expand a audio time series.\n\n Args:\n audio_time_series (array): An audio time series\n n_filters (int): The number of filters in each frame\n win_length (int): The length of each window in frames\n win_hop (int): the number of frames between the starting frame of each consecutive window.\n\n Returns:\n melnormalized (array): A melnormalized representation of the data stored in audio_time_series\n \"\"\"\n normalizedy = librosa.util.normalize(audio_time_series)\n\n stft = librosa.core.stft(normalizedy, n_fft = 512, hop_length=win_hop, win_length=win_length)\n mel = librosa.feature.melspectrogram(S=stft, n_mels=n_filters)\n mellog = np.log(mel + 1e-9)\n melnormalized = librosa.util.normalize(mellog)\n melnormalized = np.expand_dims(melnormalized, axis=-1)\n melnormalized = melnormalized.swapaxes(0,1)\n\n return melnormalized\n" }, { "alpha_fraction": 0.5835438370704651, "alphanum_fraction": 0.5911223292350769, "avg_line_length": 33.209877014160156, "blob_id": "853d577b8842145303853c73de78b466d3c1ed67", "content_id": "6e2c02098f11bbb9a0a6e1a433ac2f070dd27c81", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5542, "license_type": "permissive", "max_line_length": 118, "num_lines": 162, "path": "/somnus/somnus.py", "repo_name": "Skuldur/somnus", "src_encoding": "UTF-8", "text": "from queue import Queue\nfrom threading import Thread\nimport sys\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' \n\nimport numpy as np\nimport pyaudio\n\nfrom somnus.models import BaseModel\nfrom somnus.preprocess_audio import melnormalize\n\n\nclass Somnus():\n \"\"\"\n Args:\n model (string): The file containing the trained model\n device_index (int): The device index of the microphone that Somnus should listen to.\n threshold (float): A threshold for how confident Somnus has to be for it to detect the keyword (between [0,1])\n data_shape (tuple): The input shape for the keyword model\n sample_duration (float): How long the input of the keyword model should be in seconds\n n_filters (int): The number of filters in each frame\n win_length (int): The length of each window in frames\n win_hop (int): the number of frames between the starting frame of each consecutive window.\n \"\"\"\n def __init__(\n self, \n model='',\n device_index=0, \n threshold=0.8, \n audio_config=None\n ):\n\n if not audio_config:\n audio_config = self._get_default_config()\n\n self.model = BaseModel()\n self.model.load(model)\n\n self.chunk_duration = 0.1 # Each read length in seconds from mic.\n self.fs = 16000 # sampling rate for mic\n self.chunk_samples = int(self.fs * self.chunk_duration) # Each read length in number of samples.\n\n # Each model input data duration in seconds, need to be an integer numbers of chunk_duration\n self.feed_samples = int(self.fs * audio_config['sample_duration'])\n \n self.threshold = threshold\n\n # Data buffer for the input wavform\n self.data = np.zeros(self.feed_samples, dtype='int16')\n self.device_index = device_index\n\n # variables for preprocessing the audio stream\n self.n_filters = audio_config['n_filters']\n self.win_length = audio_config['win_length']\n self.win_hop = audio_config['win_hop']\n\n # Optional variables for continuous listening mode\n # Queue to communiate between the audio callback and main thread\n self.q = None\n self.stream = None\n self.listening = False\n\n def listen(self):\n \"\"\"\n Fetches data from the audio buffer until it detects a trigger word\n\n Returns:\n True if the key word is detected, otherwise False\n \"\"\"\n self._setup_stream()\n try:\n self.stream.start_stream()\n while True:\n audio_stream = self.q.get().astype('float')\n result, confidence = self._get_prediction(audio_stream)\n\n if result == 0 and confidence > self.threshold:\n self.listening = False\n return True \n except (KeyboardInterrupt, SystemExit):\n self.stream.stop_stream()\n self.stream.close()\n sys.exit()\n except:\n # if something fails then we return False\n return False\n\n def detect_keyword(self, audio_stream):\n \"\"\"\n Normalizes the audio_stream argument and detects whether or not it contains the key word\n\n Args:\n audio_stream (array): An audio time series\n\n Returns:\n True if the key word is detected, otherwise False\n \"\"\"\n result, confidence = self._get_prediction(audio_stream)\n\n if result == 0 and confidence > self.threshold:\n return True\n return False\n\n def _get_audio_input_stream(self):\n stream = pyaudio.PyAudio().open(\n format=pyaudio.paInt16,\n channels=1,\n rate=self.fs,\n input=True,\n frames_per_buffer=self.chunk_samples,\n input_device_index=self.device_index,\n stream_callback=self._callback)\n return stream\n\n def _get_default_config(self):\n \"\"\"The default config assumes that all the default arguments for the Somnus CLI were used\"\"\"\n return {\n 'data_shape': (101, 40, 1), \n 'sample_duration': 1.,\n 'n_filters': 40,\n 'win_length': 400,\n 'win_hop': 160\n }\n\n def _callback(self, in_data, frame_count, time_info, status): \n data0 = np.frombuffer(in_data, dtype='int16')\n \n self.data = np.append(self.data,data0) \n if len(self.data) > self.feed_samples:\n self.data = self.data[-self.feed_samples:]\n # Process data async by sending a queue.\n if self.listening:\n self.q.put(self.data)\n return (in_data, pyaudio.paContinue)\n\n def _setup_stream(self):\n \"\"\" \n Initialize the audio stream for continuous listening\n \"\"\"\n self.stream = self._get_audio_input_stream()\n self.listening = True\n self.q = Queue()\n self.data = np.zeros(self.feed_samples, dtype='int16')\n\n def _get_prediction(self, audio_stream):\n \"\"\"\n Predicts the class of the audio time series\n\n Args:\n audio_stream (array): An audio time series\n\n Returns:\n Returns the predicted class and the confidence the model has in its prediction\n \"\"\"\n data = melnormalize(audio_stream, self.n_filters, self.win_length, self.win_hop)\n data = np.expand_dims(data, axis=0)\n\n preds = self.model.predict(data)\n res = np.argmax(preds)\n\n return res, max(preds)\n" }, { "alpha_fraction": 0.5801281929016113, "alphanum_fraction": 0.6023504137992859, "avg_line_length": 33.16058349609375, "blob_id": "4e4c3ee7d65368b9c2fd8e666d1d62da713d0bc2", "content_id": "691c708eb6bad06b27db5ac4e99409b82574e84a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4680, "license_type": "permissive", "max_line_length": 125, "num_lines": 137, "path": "/somnus/models.py", "repo_name": "Skuldur/somnus", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom tensorflow.keras.layers import Dense, GRU, Bidirectional, Input, Dropout, MaxPooling2D, Conv2D, Flatten, TimeDistributed\nfrom tensorflow.keras.models import Model, load_model\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.callbacks import ReduceLROnPlateau, ModelCheckpoint\nfrom tensorflow.keras import losses\n\nclass BaseModel():\n def __init__(self):\n self.model = None\n self.filepath = ''\n\n def compile(self, learning_rate):\n opt = Adam(lr=learning_rate)\n self.model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=[\"accuracy\"])\n\n def train(self, data, labels, val_data, val_labels, epochs, save_best, batch_size):\n callbacks = []\n reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,\n patience=5, min_lr=0.00001)\n callbacks.append(reduce_lr)\n\n if save_best:\n checkpoint = ModelCheckpoint(\n self.filepath,\n monitor='loss',\n verbose=0,\n save_best_only=True,\n mode='min'\n )\n callbacks.append(checkpoint)\n\n self.model.fit(\n x=data,\n y=labels,\n validation_data=(val_data, val_labels),\n batch_size=batch_size,\n epochs=epochs,\n verbose=1,\n shuffle=True,\n callbacks=callbacks\n )\n\n def predict(self, input):\n p = self.model.predict(input)\n\n return p.reshape(-1)\n\n def save(self, name):\n self.model.save(name)\n\n def load(self, weights_path):\n self.model = load_model(weights_path)\n\n\nclass CnnTradFPool(BaseModel):\n def __init__(self, input_shape):\n \"\"\"\n Function creating the model's graph in Keras.\n \n Argument:\n input_shape: shape of the model's input data (using Keras conventions)\n \"\"\"\n self.filepath = \"cnn-trad-f-pool-{epoch:02d}-{loss:.4f}.hdf5\"\n \n X_input = Input(shape = input_shape)\n\n conv1 = Conv2D(64, kernel_size=(66, 8), strides=1, padding='same', activation='relu')(X_input)\n drop1 = Dropout(0.2)(conv1)\n maxpool = MaxPooling2D(pool_size=[3, 3], strides=[3,3], padding='same')(drop1)\n\n conv2 = Conv2D(64, kernel_size=(32, 4), strides=1, padding='same', activation='relu')(maxpool)\n drop2 = Dropout(0.2)(conv2)\n flattened = Flatten()(drop2)\n\n dense = Dense(3, activation='softmax')(flattened)\n\n self.model = Model(inputs = X_input, outputs = dense)\n\n\nclass CnnOneFStride(BaseModel):\n def __init__(self, input_shape):\n \"\"\"\n Function creating the model's graph in Keras.\n \n Argument:\n input_shape: shape of the model's input data (using Keras conventions)\n \"\"\"\n self.filepath = \"cnn-one-f-stride-{epoch:02d}-{loss:.4f}.hdf5\"\n\n X_input = Input(shape = input_shape)\n\n conv1 = Conv2D(186, kernel_size=(101, 8), strides=(1,4), padding='valid', activation='relu')(X_input)\n drop1 = Dropout(0.2)(conv1)\n flattened = Flatten()(drop1)\n\n dense = Dense(128, activation='relu')(flattened)\n dense = Dense(128, activation='relu')(dense)\n dense = Dense(3, activation='softmax')(dense)\n\n self.model = Model(inputs = X_input, outputs = dense)\n\n\nclass CrnnTimeStride(BaseModel):\n def __init__(self, input_shape):\n \"\"\"\n Function creating the model's graph in Keras.\n \n Argument:\n input_shape: shape of the model's input data (using Keras conventions)\n \"\"\"\n self.filepath = \"crnn-time-stride-{epoch:02d}-{loss:.4f}.hdf5\"\n \n X_input = Input(shape = input_shape)\n\n conv1 = Conv2D(32, kernel_size=(20, 5), strides=(8,2), padding='same', activation='relu')(X_input)\n bigru1 = TimeDistributed(Bidirectional(GRU(units=32, return_sequences=True, unroll=True)))(conv1)\n bigru2 = TimeDistributed(Bidirectional(GRU(units=32, unroll=True)))(bigru1)\n flatten = Flatten()(bigru2)\n dense1 = Dense(64, activation='relu')(flatten)\n output = Dense(3, activation='softmax')(dense1)\n\n self.model = Model(inputs = X_input, outputs = output)\n\n\n# Model utils\ndef get_model(model_name, shape):\n if model_name == 'cnn-one-stride':\n model = CnnOneFStride(input_shape=shape)\n elif model_name == 'cnn-trad-pool':\n model = CnnTradFPool(input_shape=shape)\n elif model_name == 'crnn-time-stride':\n model = CrnnTimeStride(input_shape=shape)\n else:\n raise ValueError(\"Model type %s not supported\" % model)\n\n return model\n" }, { "alpha_fraction": 0.6593939661979675, "alphanum_fraction": 0.6840404272079468, "avg_line_length": 32, "blob_id": "c91e1a4dd64527659f0e406f1a2db94de9360937", "content_id": "a852fb9285edf2d5cd23eace813d45954a816652", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2475, "license_type": "permissive", "max_line_length": 102, "num_lines": 75, "path": "/somnus/utils.py", "repo_name": "Skuldur/somnus", "src_encoding": "UTF-8", "text": "import os\nimport glob\nfrom pathlib import Path\n\nimport numpy as np\nfrom pydub import AudioSegment\nfrom tqdm import tqdm\n\nSUPPORTED_AUDIO_EXTENSIONS = ['.wav', '.mp3', '.flac', '.ogg', '.flv', '.wma', '.aac']\n\n# Used to standardize volume of audio clip\ndef match_target_amplitude(sound, target_dBFS):\n change_in_dBFS = target_dBFS - sound.dBFS\n return sound.apply_gain(change_in_dBFS)\n\n# Load raw audio files for speech synthesis\ndef load_raw_audio(base_dir, length=1):\n base = AudioSegment.silent(duration=length * 1000)\n def load_directory(audio_dir, loop=False):\n audio_segments = []\n for path in Path(os.path.join(base_dir, audio_dir)).rglob('*.*'):\n if path.suffix in SUPPORTED_AUDIO_EXTENSIONS:\n audio_format = path.suffix.split('.')[-1]\n segment = AudioSegment.from_file(path.absolute(), format=audio_format).set_channels(1)\n\n if loop:\n segment = base.overlay(segment, loop=True)\n audio_segments.append(segment)\n\n return audio_segments\n\n activates = load_directory('positives')\n backgrounds = load_directory('backgrounds', loop=True)\n negatives = load_directory('negatives')\n\n return activates, negatives, backgrounds\n\ndef create_positive_example(background, activate, time_shift):\n background_var = np.random.randint(-15, 10)\n background = background + background_var\n\n background = background.overlay(activate, position = time_shift)\n\n background = match_target_amplitude(background, -20.0)\n\n background = background.set_frame_rate(16000)\n\n return background\n\ndef create_negative_example(background, dummy, negative, time_shift):\n background_var = np.random.randint(-15, 10)\n background = background + background_var\n background = background.overlay(dummy - 100)\n\n if len(negative) - 1000 <= 0:\n random_start = np.random.randint(0, 300)\n else:\n random_start = np.random.randint(0, len(negative) - 1000)\n\n background = background.overlay(negative[random_start:], position = time_shift)\n\n background = match_target_amplitude(background, -20.0)\n\n background = background.set_frame_rate(16000)\n\n return background\n\ndef create_silent_example(background, dummy):\n background_var = np.random.randint(-15, 10)\n background = background + background_var\n background = background.overlay(dummy - 100)\n\n background = background.set_frame_rate(16000)\n\n return background\n" }, { "alpha_fraction": 0.8703703880310059, "alphanum_fraction": 0.8703703880310059, "avg_line_length": 5.875, "blob_id": "7a2ad2305f4a5bc595e5f3d254a05da38bf3dc1c", "content_id": "88f3667279bd708db49897a09b68f95185eb5800", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 54, "license_type": "permissive", "max_line_length": 10, "num_lines": 8, "path": "/requirements.txt", "repo_name": "Skuldur/somnus", "src_encoding": "UTF-8", "text": "numpy\npydub\npyaudio\nscipy\nlibrosa\ntensorflow\nfire\ntqdm" }, { "alpha_fraction": 0.6249088644981384, "alphanum_fraction": 0.6331486105918884, "avg_line_length": 41.3271598815918, "blob_id": "64272666ba29a9e37456ef993177a69a4153525a", "content_id": "cf927d4a633b6f831d7a1f8569ac5e97bc7ef9a7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13714, "license_type": "permissive", "max_line_length": 160, "num_lines": 324, "path": "/cli/cli.py", "repo_name": "Skuldur/somnus", "src_encoding": "UTF-8", "text": "import json\nfrom functools import wraps\nfrom pathlib import Path\nfrom types import FunctionType\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' \n\nimport fire\nfrom pydub import AudioSegment\nimport numpy as np\nfrom tqdm import tqdm\nimport pyaudio\n\nfrom somnus.utils import load_raw_audio, create_positive_example, create_negative_example, create_silent_example\n\n\nCONFIG_ERROR_MSGS = {\n 'raw_audio': 'Raw Audio Directory is not set. ',\n 'augmented_audio': 'Augmented Audio Directory is not set. ',\n 'preprocessed_data': 'Preprocessed Data Directory is not set. '\n}\n\ndef config_wrapper(method, config):\n @wraps(method)\n def wrapped(*args, **kwargs):\n for key in CONFIG_ERROR_MSGS.keys():\n if not config.get(key):\n raise ValueError(CONFIG_ERROR_MSGS[key] + 'Please run \\'somnus configure\\' to set value!')\n \n method(*args, **kwargs)\n return wrapped\n\nclass ConfigWrapper(type):\n def __new__(meta, classname, bases, classDict):\n newClassDict = {}\n config = classDict['config']\n\n # Wraps every public method except configure so that the user needs to run 'somnus configure'\n # before using the other methods\n for attributeName, attribute in classDict.items():\n if isinstance(attribute, FunctionType) and attributeName != 'configure' and attributeName[0] != '_': \n # replace it with a wrapped version\n attribute = config_wrapper(attribute, config)\n newClassDict[attributeName] = attribute\n return type.__new__(meta, classname, bases, newClassDict)\n\n\nclass SomnusCLI(metaclass=ConfigWrapper):\n config = {}\n\n def __init__(self, numpy_seed=1, base_dir='.'):\n np.random.seed(numpy_seed)\n\n self._load_config()\n \n def _load_config(self):\n config = {}\n config_file = os.path.join(Path.home(), '.somnus', 'config.json')\n if os.path.isfile(config_file):\n with open(config_file, 'r') as file:\n config = json.load(file)\n\n # we copy the values over so that the config wrapper has a reference to the correct config\n for key, val in config.items():\n self.config[key] = val\n\n def configure(self):\n config = {}\n config['raw_audio'] = input(\"Raw Audio Directory [%s]: \" % self.config.get('raw_audio')) or self.config.get('raw_audio')\n config['augmented_audio'] = input(\"Augmented Audio Directory [%s]: \" % self.config.get('augmented_audio')) or self.config.get('augmented_audio')\n config['preprocessed_data'] = input(\"Preprocessed Data Directory [%s]: \" % self.config.get('preprocessed_data')) or self.config.get('preprocessed_data')\n\n config_file = os.path.join(Path.home(), '.somnus', 'config.json')\n os.makedirs(os.path.dirname(config_file), exist_ok=True)\n with open(config_file, 'w') as file:\n file.write(json.dumps(config))\n\n def augment_audio(self, duration=1, positive=90000, negative=120000, background=90000):\n \"\"\"\n Method to create the audio dataset for keyword recognition.\n\n For each audio clip that is generated the following algorithm is executed:\n\n 1. Select an audio clip of background noise\n 2. Randomly increase of decrease the DB of the background noise\n 3. Overlay a voice clip where applicable with a random shift to the right\n 4. Standardize the volume of the audio clip\n 5. Set the frame rate of the resulting audio clip to 16k\n 6. Write the audio to a file in processed_audio/\n\n :param duration: The duration of the audio clips in seconds\n :param positive: The number of positive examples\n :param negative: The number of negative examples\n :param background: The number of examples containing only background noise\n \"\"\"\n\n print('Please wait while we load the raw audio files...')\n positives, negatives, backgrounds = load_raw_audio(self.config['raw_audio'], duration)\n\n if len(positives) == 0:\n raise IndexError('There are no supported audio files in %s/%s' % (self.config['raw_audio'], 'positives'))\n if len(negatives) == 0:\n raise IndexError('There are no supported audio files in %s/%s' % (self.config['raw_audio'], 'negatives'))\n if len(backgrounds) == 0:\n # Add a single silent background audio segment\n backgrounds.append(AudioSegment.silent(duration=duration * 1000))\n\n aug_path = self.config['augmented_audio']\n print('Augmenting positive audio samples:')\n for i in tqdm(range(positive)):\n time_shift = np.random.randint(200)\n segment = create_positive_example(backgrounds[i % len(backgrounds)], positives[i % len(positives)], time_shift)\n\n segment.export(os.path.join(aug_path, 'positive_%s.wav' % i), format='wav')\n\n print('Augmenting negative audio samples:')\n for i in tqdm(range(negative)):\n time_shift = np.random.randint(600)\n segment = create_negative_example(backgrounds[i % len(backgrounds)], positives[0], negatives[i % len(negatives)], time_shift)\n\n segment.export(os.path.join(aug_path, 'negative_%d.wav' % i), format='wav')\n\n print('Augmenting background audio audio samples:')\n for i in tqdm(range(background)):\n segment = create_silent_example(backgrounds[i % len(backgrounds)], positives[0])\n\n segment.export(os.path.join(aug_path, 'background_%s.wav' % i), format='wav')\n\n def preprocess(self, filters=40, show_progress=True, split=(0.9, 0.05, 0.05), win_length=0.025, win_hop=0.01):\n \"\"\"\n Preprocess the augmented audio and create a dataset of numpy arrays ready for use with the keyword detection models\n\n :param filters: The number of filters in each frame\n :param show_progress: Boolean option to decide whether to show a progress bar (NOTE: showing progress bar may slow down processing)\n :param split: How much data should be in the training, validation, and test datasets. Values must add up to 1.\n :param win_length: The length of each window in seconds\n :param win_hop: the time between the start of each consecutive window.\n \"\"\"\n from somnus.preprocess_audio import create_dataset\n assert sum(split) == 1\n\n augmented_path = self.config['augmented_audio']\n preprocessed_path = self.config['preprocessed_data']\n frame_rate = 16000\n\n win_length = int(win_length * frame_rate)\n win_hop = int(win_hop * frame_rate)\n\n data, labels = create_dataset(augmented_path, filters, show_progress, win_length, win_hop)\n\n # randomly shuffle the data\n p = np.random.permutation(len(data))\n data = data[p]\n labels = labels[p]\n\n train_data, val_data, test_data = np.split(data, [int(len(data)*split[0]), int(len(data)*(split[0] + split[1]))])\n train_labels, val_labels, test_labels = np.split(labels, [int(len(labels)*split[0]), int(len(labels)*(split[0] + split[1]))])\n\n np.save(os.path.join(preprocessed_path, 'train_data.npy'), train_data)\n np.save(os.path.join(preprocessed_path, 'train_labels.npy'), train_labels)\n np.save(os.path.join(preprocessed_path, 'validation_data.npy'), val_data)\n np.save(os.path.join(preprocessed_path, 'validation_labels.npy'), val_labels)\n np.save(os.path.join(preprocessed_path, 'test_data.npy'), test_data)\n np.save(os.path.join(preprocessed_path, 'test_labels.npy'), test_labels)\n\n audio_config = {\n 'data_shape': train_data[0].shape, \n 'sample_duration': (train_data[0].shape[0]-1) / (frame_rate // win_hop),\n 'n_filters': filters,\n 'win_length': win_length,\n 'win_hop': win_hop\n }\n\n print(\"\\nUse the following config as the audio_config parameter in Somnus when using models trained with this dataset: \\n\\n%s\" \n % json.dumps(audio_config, indent=2, sort_keys=True))\n\n def train(self, model_name='cnn-one-stride', epochs=200, output='saved_model.h5',\n save_best=False, batch_size=64, lr=0.0001):\n \"\"\"\n Trains a small-footprint keyword detection model using augmented WAV files\n\n :param model_name: The name of the model we want to train\n :param epochs: The number of epochs\n :param output: The name of the file the final model should be saved to\n :param save_best: Whether or not the model should save the best model throughout the training process\n :param batch_size: The size of each mini batch\n :param lr: The initial learning rate\n \"\"\"\n from somnus.models import get_model\n\n preprocessed_path = self.config['preprocessed_data']\n\n train_data = np.load(os.path.join(preprocessed_path, 'train_data.npy'))\n train_labels = np.load(os.path.join(preprocessed_path, 'train_labels.npy'))\n val_data = np.load(os.path.join(preprocessed_path, 'validation_data.npy'))\n val_labels = np.load(os.path.join(preprocessed_path, 'validation_labels.npy'))\n\n shape = train_data[0].shape\n model = get_model(model_name, shape)\n model.compile(lr)\n model.train(train_data, train_labels, val_data, val_labels, epochs, save_best, batch_size)\n model.save(output)\n\n def test(self, model='saved_model.h5'):\n \"\"\"\n Tests a trained model against a test dataset\n\n :param model: The file containing the model we want to test\n \"\"\"\n from somnus.models import BaseModel\n\n model_path = model\n\n preprocessed_path = self.config['preprocessed_data']\n\n data = np.load(os.path.join(preprocessed_path, 'test_data.npy'))\n labels = np.load(os.path.join(preprocessed_path, 'test_labels.npy'))\n\n model = BaseModel()\n\n model.load(model_path)\n\n wrong = 0\n for idx in tqdm(range(len(data))):\n audio = data[idx]\n label = labels[idx]\n p = model.predict(np.expand_dims(audio, axis=0))\n\n if np.argmax(p) != np.argmax(label):\n wrong += 1\n\n percentage = 100*((len(data)-wrong) / len(data))\n print(\"\\n Test dataset accuracy is %s percent\" % percentage)\n\n def list_microphones(self):\n \"\"\"\n List all microphones connected to the device\n \"\"\"\n p = pyaudio.PyAudio()\n for i in range(p.get_device_count()):\n print(\"Input Device id \", i, \" - \", p.get_device_info_by_host_api_device_index(0, i).get('name'))\n\n def cleanup(self):\n \"\"\"\n Removes all the data files that have been generated by the CLI in the Augmented Audio Directory \n and the Preprocessed Data Directory\n \"\"\"\n paths = [self.config['augmented_audio'], self.config['preprocessed_data']]\n\n for path in paths:\n filelist = [ f for f in os.listdir(path)]\n print(\"Deleting files from %s:\" % path)\n for f in tqdm(filelist):\n os.remove(os.path.join(path, f))\n\n def quantize_model(self, model='saved_model.h5', output='saved_model.tflite', quantization='cpu'):\n \"\"\"\n Converts the model to TFLite and quantizes it. This reduces the size of the model by a factor of 2-3, \n while barely affecting the accuracy of the model\n\n :param model: The file containing the model we want to convert\n :param output: The file that the TFLite model will be written to\n :param quantization: The hardware that the model should be optimized for (CPU, GPU)\n \"\"\"\n import tensorflow as tf\n from somnus.models import BaseModel\n\n model_path = model\n model = BaseModel()\n model.load(model_path)\n\n converter = tf.lite.TFLiteConverter.from_keras_model(model.model)\n \n # quantize the model. This reduces the size of the model significantly while reducing accuracy\n # only slightly\n quantization = quantization.lower()\n if quantization == 'cpu':\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n elif quantization == 'gpu':\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n converter.target_spec.supported_types = [tf.float16]\n \n tflite_model = converter.convert()\n\n open(output, \"wb\").write(tflite_model)\n\n def test_quantized_model(self, quantized_model='saved_model.tflite'):\n \"\"\"\n Tests a trained model against a test dataset\n\n :param quantized_model: The file containing the quantized model we want to test\n \"\"\"\n import tensorflow as tf\n\n\n interpreter = tf.lite.Interpreter(model_path=quantized_model)\n interpreter.allocate_tensors()\n\n preprocessed_path = self.config['preprocessed_data']\n\n data = np.load(os.path.join(preprocessed_path, 'test_data.npy'))\n labels = np.load(os.path.join(preprocessed_path, 'test_labels.npy'))\n\n wrong = 0\n for idx in tqdm(range(len(data))):\n audio = data[idx]\n label = labels[idx]\n\n input_index = interpreter.get_input_details()[0][\"index\"]\n output_index = interpreter.get_output_details()[0][\"index\"]\n\n interpreter.set_tensor(input_index, np.expand_dims(audio, axis=0).astype(np.float32))\n interpreter.invoke()\n p = interpreter.get_tensor(output_index)\n\n if np.argmax(p) != np.argmax(label):\n wrong += 1\n\n percentage = 100*((len(data)-wrong) / len(data))\n print(\"\\n Test dataset accuracy is %s percent\" % percentage)\n\n\ndef main():\n fire.Fire(SomnusCLI)\n" }, { "alpha_fraction": 0.5371312499046326, "alphanum_fraction": 0.5707019567489624, "avg_line_length": 27.08571434020996, "blob_id": "a34ca8c6e619c134b9a1ca8443a86686dfc8b0bb", "content_id": "f7f8e864972c983bff3a89442b814c36fc2b717a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 983, "license_type": "permissive", "max_line_length": 58, "num_lines": 35, "path": "/setup.py", "repo_name": "Skuldur/somnus", "src_encoding": "UTF-8", "text": "import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"somnus\",\n version=\"0.2.2\",\n author=\"Sigurður Skúli Sigurgeirsson\",\n author_email=\"[email protected]\",\n description=\"Somnus is keyword detection made easy.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/Skuldur/somnus\",\n packages=setuptools.find_packages(exclude=(\"tests\",)),\n install_requires=[\n \"numpy>=1.16.2\",\n \"pydub>=0.23.1\",\n \"pyaudio>=0.2.11\",\n \"librosa>=0.8.0\",\n \"tensorflow>=2.2.0\",\n \"fire>=0.3.1\",\n \"tqdm>=4.47.0\"\n ],\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n entry_points = {\n 'console_scripts': ['somnus=cli.cli:main'],\n },\n python_requires='>=3.6',\n)\n" }, { "alpha_fraction": 0.7243683338165283, "alphanum_fraction": 0.7285445928573608, "avg_line_length": 36.13178253173828, "blob_id": "18318da76f90dd4c2e293347022758f6a2923f02", "content_id": "956e84068931f1c1f359adb186c3aa25bf4af023", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4789, "license_type": "permissive", "max_line_length": 292, "num_lines": 129, "path": "/cli/README.md", "repo_name": "Skuldur/somnus", "src_encoding": "UTF-8", "text": "# Somnus CLI\n\nSomnus comes with a CLI that allows you to generate audio data and train your own keyword detection model. The CLI is implemented using Python-Fire. For each command you can use `-h` or `--help` to get a description of the command and a list of the possible arguments/options for the command.\n\nTo start using the CLI run `somnus configure` to create the configuration for the Somnus CLI. Then the raw data directory must contain three sub-directories:\n\n* `positives/` for audio files containing utterances of the keyword. Must contain at least 1 audio file.\n* `negatives/` for audio files containing speech that does not contain utterances of the keyword. Must contain at least 1 audio file.\n* `backgrounds/` for audio files that contain background noise. This directory is optional but we recommend adding noise to the training data so that the keyword detector also works in noisy conditions.\n\nThe CLI currently supports the following audio types: **wav, mp3, flac, ogg, flv, wma, aac**\n\n## Configure\n\n```bash\nsomnus configure\n```\n\nCreate a configuration file with the absolute paths to the:\n\n* Raw audio data directory\n* Directory that should contain the augmented audio files\n* Directory that should contain the preprocessed data files\n\n**Note** that the augmented audio files and preprocessed data files can use a lot of space so make sure to put them on a drive with a lot of available space.\n\n## Augmenting audio\n\n```bash\nsomnus augment_audio\n```\n\nThe command to generate an audio dataset takes the raw audio in your raw audio directory as input and generates positive, negative, and silent audio files with varying amounts of background noise. These audio files are written to the augmented audio directory.\n \n**Arguments**\n* **duration**: The duration of the audio clips in seconds \n* **positive**: The number of positive examples \n* **negative**: The number of negative examples \n* **silent**: The number of examples containing only background noise \n\n## Preprocessing and creating the dataset\n```bash\nsomnus preprocess\n```\n\nThe command to preprocess the augmented audio files. It takes the files stored in the augmented audio directory, normalizes them and stores the output array in the preprocessed data directory.\n\n**Arguments**\n* **filters**: The number of filters in each frame \n* **show_progress**: Boolean option to decide whether to show a progress bar \n* **split**: The split between train, validation, and test data. The total should add up to 1. E.g. `(0.9, 0.05, 0.05)` \n* **win_length**: The length of each window in seconds \n* **win_hop**: the time between the start of each consecutive window. \n\n## Training\n\n```bash\nsomnus train\n```\n\nThe command to train a small-footprint keyword model loads the data in `./preprocessed_data/` and uses it to train the keyword model.\n\n**Arguments**\n* **model_name**: The name of the model we want to train \n* **epochs**: The number of epochs \n* **weights_file**: The name of the file the final weights should be saved to \n* **save_best**: Whether or not the model should save the best model throughout the training process \n* **batch_size**: The size of each mini batch \n* **lr**: The initial learning rate \n\n## Testing\n\n```bash\nsomnus test\n```\n\nThe command to test a trained model on a witheld test dataset.\n\n**Arguments**\n* **model**: The file containing the model we want to test\n\n#### List microphones\n\n```bash\nsomnus list_microphones\n```\n\nPrints out a list of microphones connected to your device along with their device IDs.\n\n## Cleanup files\n\n```bash\nsomnus cleanup\n```\n\nRemoves all the data files that have been generated by the CLI in the Augmented Audio Directory and the Preprocessed Data Directory\n\n\n## Quantize model\n\n```bash\nsomnus quantize_model\n```\n\nConverts the model to a format compatible with Tensorflow Lite (tflite) and quantizes it. Quantizing the model reduces the size of the model by a factor of 2-4 while barely reducing the total accuracy of the model.\n\nThe CLI offers two different methods for quantization:\n\n| Option | Quantization Method | Benefits | Hardware |\n|--------|----------------------------|------------------------------|----------|\n| CPU | Dynamic range quantization | 4x smaller, 2x-3x speedup | CPU |\n| GPU | Float16 quantization | 2x smaller, GPU acceleration | CPU, GPU |\n\n**Arguments**\n\n* **model**: The file containing the model we want to convert\n* **output**: The file that the TFLite model will be written to\n* **quantization**: The hardware that the model should be optimized for (CPU, GPU)\n\n## Testing Quantized model\n\n```bash\nsomnus test_quantized_model\n```\n\nThe command to test a quantized model on a witheld test dataset.\n\n**Arguments**\n* **model**: The file containing the model we want to test" }, { "alpha_fraction": 0.6381766200065613, "alphanum_fraction": 0.6509971618652344, "avg_line_length": 31.8125, "blob_id": "9af0bf182018d8ac68ac67904a14f36ba805b911", "content_id": "d2eb6a5db8401925cbe97f118a7113dda5f4f7eb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2106, "license_type": "permissive", "max_line_length": 112, "num_lines": 64, "path": "/tests/test_utils.py", "repo_name": "Skuldur/somnus", "src_encoding": "UTF-8", "text": "import os\nimport unittest\n\nfrom pydub import AudioSegment\nimport numpy as np\n\nfrom somnus.utils import load_raw_audio, create_positive_example, create_negative_example, create_silent_example\n\nTEST_DIR = os.path.dirname(os.path.realpath(__file__))\n\n\nclass TestUtils(unittest.TestCase):\n\n def setUp(self):\n np.random.seed(1)\n\n def test_load_raw_audio(self):\n base_dir = os.path.join(TEST_DIR, 'fixtures')\n duration = 1\n\n pos, neg, back = load_raw_audio(base_dir, duration)\n\n self.assertEqual(len(pos), 1)\n self.assertEqual(len(neg), 1)\n self.assertEqual(len(back), 1)\n\n def test_create_positive_example(self):\n base_dir = os.path.join(TEST_DIR, 'fixtures')\n duration = 1\n\n pos, neg, back = load_raw_audio(base_dir, duration)\n\n time_shift = np.random.randint(200)\n pos_seg = create_positive_example(back[0], pos[0], time_shift)\n\n true_seg = AudioSegment.from_wav(os.path.join(base_dir, 'augmented/positive_0.wav')).set_channels(1)\n\n self.assertSequenceEqual(pos_seg.get_array_of_samples(), true_seg.get_array_of_samples())\n\n def test_create_negative_example(self):\n base_dir = os.path.join(TEST_DIR, 'fixtures')\n duration = 1\n\n pos, neg, back = load_raw_audio(base_dir, duration)\n\n time_shift = np.random.randint(600)\n neg_seg = create_negative_example(back[0], pos[0], neg[0], time_shift)\n\n true_seg = AudioSegment.from_wav(os.path.join(base_dir, 'augmented/negative_0.wav')).set_channels(1)\n \n\n self.assertSequenceEqual(neg_seg.get_array_of_samples(), true_seg.get_array_of_samples())\n\n def test_create_silent_example(self):\n base_dir = os.path.join(TEST_DIR, 'fixtures')\n duration = 1\n\n pos, neg, back = load_raw_audio(base_dir, duration)\n\n back_seg = create_silent_example(back[0], pos[0])\n\n true_seg = AudioSegment.from_wav(os.path.join(base_dir, 'augmented/background_0.wav')).set_channels(1)\n\n self.assertSequenceEqual(back_seg.get_array_of_samples(), true_seg.get_array_of_samples())\n\n\n\n\n\n\n" } ]
10
inaccare/Beer-Type-Prediction
https://github.com/inaccare/Beer-Type-Prediction
ffeaba53d6b3fee223c747183d60447feb45385d
89327f2d721eaee74d144f01e40d53ee3d0c0eb3
1c7035b2099aa542887eaefc67e41ba42f951fb0
refs/heads/master
2020-03-15T03:06:18.203463
2018-06-14T07:25:23
2018-06-14T07:25:23
131,934,457
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.603411078453064, "alphanum_fraction": 0.6408547163009644, "avg_line_length": 34.1793098449707, "blob_id": "7cf4e9dba1f5d933034d12c4accc3a9d0002e5c6", "content_id": "9be6219a2accfb1ec6f78caafaa50f845603dc36", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5101, "license_type": "no_license", "max_line_length": 251, "num_lines": 145, "path": "/cluster.py", "repo_name": "inaccare/Beer-Type-Prediction", "src_encoding": "UTF-8", "text": "\nimport nltk\nimport gensim\nimport pandas as pd\nimport numpy as np\nimport sys\nimport pylab as pl\nimport codecs\n\nfrom sklearn.cluster import KMeans\nfrom sklearn.decomposition import PCA\n\n\n\ndef average_vecs(w2v):\n\ttokenizer = nltk.data.load('tokenizers/punkt/english.pickle')\n\twith open('beers.csv') as f:\n\t\tdf = pd.read_csv(f)\n\t\tstyles = df['style'].unique()[:-1]\n\t\tall_styles = np.zeros((89, 100))\n\t\tfor i, s in enumerate(styles):\n\t\t\tall_beers = df.loc[df['style'] == s]\n\t\t\tsummed = np.zeros((100))\n\t\t\ttop_thousand = all_beers.head(1000)\n\t\t\tnum_rows = len(top_thousand)\n\t\t\tfor index, row in top_thousand.iterrows():\n\t\t\t\treview = row['text']\n\t\t\t\treview_tokenized = nltk.word_tokenize(review.lower())\n\t\t\t\tfor token in review_tokenized:\n\t\t\t\t\tif token in w2v.wv.vocab:\n\t\t\t\t\t\tsummed += w2v[token]\n\t\t\tsummed /= num_rows\n\t\t\tall_styles[i] = summed\n\t\treturn all_styles, styles\n\ndef cluster(all_styles):\n\tkmeans = KMeans()\n\tkmeans.fit(all_styles)\n\treturn kmeans.labels_\n\ndef add_cluster_csv(cluster):\n\ttrain = pd.read_csv('beers_train.csv')\n\tdev = pd.read_csv('beers_dev.csv')\n\ttest = pd.read_csv('beers_test.csv')\n\n\ttrain_clusters = []\n\tdev_clusters = []\n\ttest_clusters = []\n\n\tfor index, row in train.iterrows():\n\t\ts = row['style']\n\t\tif type(s) is not str:\n\t\t\ttrain_clusters.append(float('nan'))\n\t\t\tcontinue\n\t\tstring_nobackslashes = codecs.decode(s, 'unicode_escape')\n\t\tstring_nobackslashes = string_nobackslashes.encode('ISO-8859-1')\n\t\tcorrect_string = string_nobackslashes.replace(b'\\xc3\\xa9', 'e').replace(b'\\xc3\\xb6', 'o').replace(b'&#40;IPA&#41;', '').replace(b'&#40;Witbier&#41;', '').replace('\\xc3\\xa4', 'a').replace(b'\\xc3\\xa8', 'e').replace('b\\'', '').replace('\\'', '').strip()\n\t\tcorrect_string = correct_string.decode('ascii')\n\t\ttrain_clusters.append(cluster[correct_string] + 1)\n\n\ttrain['cluster'] = train_clusters\n\n\tfor index, row in dev.iterrows():\n\t\ts = row['style']\n\t\tif type(s) is not str:\n\t\t\tdev_clusters.append(float('nan'))\n\t\t\tcontinue\n\t\tstring_nobackslashes = codecs.decode(s, 'unicode_escape')\n\t\tstring_nobackslashes = string_nobackslashes.encode('ISO-8859-1')\n\t\tcorrect_string = string_nobackslashes.replace(b'\\xc3\\xa9', 'e').replace(b'\\xc3\\xb6', 'o').replace(b'&#40;IPA&#41;', '').replace(b'&#40;Witbier&#41;', '').replace('\\xc3\\xa4', 'a').replace(b'\\xc3\\xa8', 'e').replace('b\\'', '').replace('\\'', '').strip()\n\t\tcorrect_string = correct_string.decode('ascii')\n\t\tdev_clusters.append(cluster[correct_string] + 1)\n\n\tdev['cluster'] = dev_clusters\n\n\tfor index, row in test.iterrows():\n\t\ts = row['style']\n\t\tif type(s) is not str:\n\t\t\ttest_clusters.append(float('nan'))\n\t\t\tcontinue\n\t\tstring_nobackslashes = codecs.decode(s, 'unicode_escape')\n\t\tstring_nobackslashes = string_nobackslashes.encode('ISO-8859-1')\n\t\tcorrect_string = string_nobackslashes.replace(b'\\xc3\\xa9', 'e').replace(b'\\xc3\\xb6', 'o').replace(b'&#40;IPA&#41;', '').replace(b'&#40;Witbier&#41;', '').replace('\\xc3\\xa4', 'a').replace(b'\\xc3\\xa8', 'e').replace('b\\'', '').replace('\\'', '').strip()\n\t\tcorrect_string = correct_string.decode('ascii')\n\t\ttest_clusters.append(cluster[correct_string] + 1)\n\n\ttest['cluster'] = test_clusters\n\n\ttrain.to_csv('beers_train_cluster.csv')\n\tdev.to_csv('beers_dev_cluster.csv')\n\ttest.to_csv('beers_test_cluster.csv')\n\n\ndef main():\n\treplacements = {'\\xc3\\xa9': 'e', '\\xc3\\xb6': 'o', '&#40;IPA&#41;': ''}\n\tw2vFile = sys.argv[1]\n\tw2v = gensim.models.word2vec.Word2Vec.load(w2vFile)\n\tall_styles, styles = average_vecs(w2v)\n\tnew_styles = []\n\tfor s in styles:\n\t\tstring_nobackslashes = codecs.decode(s, 'unicode_escape')\n\t\tstring_nobackslashes = string_nobackslashes.encode('ISO-8859-1')\n\t\tcorrect_string = string_nobackslashes.replace(b'\\xc3\\xa9', 'e').replace(b'\\xc3\\xb6', 'o').replace(b'&#40;IPA&#41;', '').replace(b'&#40;Witbier&#41;', '').replace('\\xc3\\xa4', 'a').replace(b'\\xc3\\xa8', 'e').replace('b\\'', '').replace('\\'', '').strip()\n\t\tcorrect_string = correct_string.decode('ascii')\n\t\tnew_styles.append(correct_string)\n\tprint(new_styles)\n\tstyles = new_styles\n\tlabels = cluster(all_styles)\n\tcluster_key = {}\n\tclusters = {}\n\tfor i in range(8):\n\t\tclusters[i] = []\n\tfor j in range(len(labels)):\n\t\tcluster_key[styles[j]] = labels[j]\n\t\tclusters[labels[j]].append(styles[j])\n\tprint(clusters)\n\n\tadd_cluster_csv(cluster_key)\n\n\tpca = PCA(n_components=2).fit(all_styles)\n\tpca_2d = pca.transform(all_styles)\n\n\tfor i in range(pca_2d.shape[0]):\n\t\tif labels[i] == 0:\n\t\t\tc1 = pl.scatter(pca_2d[i,0], pca_2d[i,1], c='r')\n\t\telif labels[i] == 1:\n\t\t\tc2 = pl.scatter(pca_2d[i,0], pca_2d[i,1], c='g')\n\t\telif labels[i] == 2:\n\t\t\tc3 = pl.scatter(pca_2d[i,0], pca_2d[i,1], c='b')\n\t\telif labels[i] == 3:\n\t\t\tc4 = pl.scatter(pca_2d[i,0], pca_2d[i,1], c='c')\n\t\telif labels[i] == 4:\n\t\t\tc5 = pl.scatter(pca_2d[i,0], pca_2d[i,1], c='m')\n\t\telif labels[i] == 5:\n\t\t\tc6 = pl.scatter(pca_2d[i,0], pca_2d[i,1], c='y')\n\t\telif labels[i] == 6:\n\t\t\tc7 = pl.scatter(pca_2d[i,0], pca_2d[i,1], c='k')\n\t\telif labels[i] == 7:\n\t\t\tc8 = pl.scatter(pca_2d[i,0], pca_2d[i,1], c='orange')\n\t\tpl.annotate(styles[i], xy=(pca_2d[i,0], pca_2d[i,1]))\n\tpl.legend([c1, c2, c3, c4, c5, c6, c7, c8], ['1', '2', '3', '4', '5', '6', '7', '8'])\n\tpl.show()\n\n\n\nmain()" }, { "alpha_fraction": 0.5767731666564941, "alphanum_fraction": 0.590023398399353, "avg_line_length": 24.68000030517578, "blob_id": "4074595ed7e2ee35fc24458eedfbf4b067d10e8b", "content_id": "59a8b4e37f42428f2d51a0b34c2152d3b5a9dd7d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1283, "license_type": "no_license", "max_line_length": 142, "num_lines": 50, "path": "/parse_data.py", "repo_name": "inaccare/Beer-Type-Prediction", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\n\ndata_path = '../ratebeer.txt'\n\ndef main():\n\twith open(data_path, encoding='utf-8') as f:\n\t\treviews = f.read().split('\\n\\n')\n\t\tparsed_data = []\n\t\tcategories = set()\n\t\tnum_reviews = len(reviews)\n\t\tfor num, r in enumerate(reviews):\n\t\t\tif num % 10000 == 0:\n\t\t\t\tprint(str(num) + '/' + str(num_reviews))\n\t\t\tdatum = []\n\t\t\tfeats = r.split('\\n')\n\t\t\tfor i, feat in enumerate(feats):\n\t\t\t\tvalues = feat.split(':')\n\t\t\t\tif len(values) < 2:\n\t\t\t\t\tvalue = 'N/A'\n\t\t\t\telse:\n\t\t\t\t\tvalue = ':'.join(values[1:]).strip()\n\t\t\t\tif i == 4:\n\t\t\t\t\tcategories.add(value)\n\t\t\t\tdatum.append(value)\n\t\t\tparsed_data.append(datum)\n\n\t\tcolumns = ['name', 'beerId', 'brewerId', 'ABV', 'style', 'appearance', 'aroma', 'palate', 'taste', 'overall', 'time', 'profileName', 'text']\n\t\tdf = pd.DataFrame(parsed_data)\n\t\tdf.columns = columns\n\n\t\tarr = np.random.rand(len(df))\n\t\tmsk_train = arr < 0.9\n\t\tmsk_dev = arr > 0.95\n\t\tmsk_test = arr > 0.9\n\t\ttrain = df[msk_train]\n\t\tdev = df[msk_dev]\n\t\ttest = df[msk_test & ~msk_dev]\n\n\t\twith open('beers_train.csv', 'w') as out:\n\t\t train.to_csv(out)\n\t\tprint('Total number of categories: ' + str(len(categories)))\n\n\t\twith open('beers_dev.csv', 'w') as out:\n\t\t dev.to_csv(out)\n\n\t\twith open('beers_test.csv', 'w') as out:\n\t\t test.to_csv(out)\n\nmain()" }, { "alpha_fraction": 0.5919678211212158, "alphanum_fraction": 0.603921115398407, "avg_line_length": 41.730308532714844, "blob_id": "4cdfe43331832a21232006d410c459b9918fda45", "content_id": "6b0dd807efcab573405e4bc42b1e8b9a7070cfa9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17903, "license_type": "no_license", "max_line_length": 196, "num_lines": 419, "path": "/LSTM_model.py", "repo_name": "inaccare/Beer-Type-Prediction", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport re\nimport sys\nimport time\nimport math\nimport numpy as np\nnp.set_printoptions(threshold=np.nan)\nimport h5py\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom tensorflow.python.framework import ops\nimport json\nimport nltk\nimport multiprocessing\nimport gensim\nfrom collections import defaultdict\nimport codecs\n\n\n\"\"\"\nThis model's architecture is as follows: product descriptions go into an LSTM layer, output of LSTM goes\nthrough a 6-layer FC network. Output is a softmax vector of length 89, corresponding to 89 distinct \nBeer types.\n\"\"\"\n\nw2v = None\nw2vFile = sys.argv[3]\nw2v = gensim.models.word2vec.Word2Vec.load(w2vFile)\n\nwordToIndex = dict()\nindexToEmb = dict()\nindexToWord = dict()\ncount = 1\nwordToIndex['padding'] = 0\nindexToWord[0] = 'padding'\nindexToEmb[0] = np.random.randn(100)\nfor w in w2v.wv.vocab:\n indexToWord[count] = w\n wordToIndex[w] = count\n indexToEmb[count] = w2v.wv[w]\n count = count + 1\n\ndef main():\n # Usage is as follows: python model.py <train>.csv <dev>.csv <glove file> \n X_test = []\n Y_test = []\n testCSV = None\n trainCSV = None\n devCSV = None\n if (len(sys.argv) >=2):\n trainCSV = sys.argv[1]\n if (len(sys.argv) >= 4):\n devCSV = sys.argv[2]\n lr, ne, bs, tx = None, None, None, 400\n if (len(sys.argv) >= 5):\n lr, ne, bs, tx = getHyperparamsFromJSON(str(sys.argv[4]))\n\n styleToIndex, indexToStyle = buildStyleIndices()\n trainDF = pd.read_csv(trainCSV, header = 0, encoding='utf-8', dtype={'text': str})\n rand_arr = np.random.rand(len(trainDF))\n msk_train = rand_arr < 0.01\n trainDF = trainDF[msk_train]\n # For each entry in X_train, we have an array of length T_x with each entry\n # corresponding to an index into the word's w2v embedding\n X_train, Y_train = getReviewIndicesAndStyles(trainDF, tx, styleToIndex)\n devDF = pd.read_csv(devCSV, header = 0, encoding='utf-8')\n rand_arr = np.random.rand(len(devDF))\n msk_dev = rand_arr > .999\n devDF = devDF[msk_dev]\n X_dev, Y_dev = getReviewIndicesAndStyles(devDF, tx, styleToIndex)\n print (\"X_train shape: \" + str(X_train.shape))\n print (\"Y_train shape: \" + str(Y_train.shape))\n print (\"X_dev shape: \" + str(X_dev.shape))\n print (\"Y_dev shape: \" + str(Y_dev.shape))\n if (lr == None):\n model(X_train, Y_train, X_dev, Y_dev, styleDict=indexToStyle)\n else:\n model(X_train, Y_train, X_dev, Y_dev, learning_rate = lr, num_epochs = ne, mini_batch_size = bs, Tx = tx, styleDict=indexToStyle)\n\ndef buildStyleIndices():\n styleDict = {}\n indexToStyleDict = {}\n df = pd.read_csv(\"beers_train.csv\", header = 0, encoding='utf-8')\n styles = df['style'].unique()[:-1]\n for i, s in enumerate(styles):\n if str(s) not in styleDict:\n styleDict[str(s)] = i\n indexToStyleDict[i] = str(s)\n return styleDict, indexToStyleDict\n\ndef cleanStyles(styles):\n new_styles = []\n for s in styles:\n correct_string = s.replace('\\xc3\\xa9', 'e').replace('\\xc3\\xb6', 'o').replace('&#40;IPA&#41;', '').replace('&#40;Witbier&#41;', '').replace('\\xc3\\xa4', 'a').replace('\\xc3\\xa8', 'e').strip()\n new_styles.append(correct_string)\n return new_styles\n\ndef getReviewIndicesAndStyles(df, T_x, styleToIndex):\n X = []\n Y = []\n numBuckets = 89\n for i, row in df.iterrows():#len(df['item_description'])):\n if (pd.isnull(row['text']) == False): # Checks for Nan descriptions\n X.append(getIndexArrForSentence(row['text'], T_x))\n else:\n X.append(np.zeros(T_x))\n Y.append(OneHot(styleToIndex[str(row['style'])], 89))\n Y = np.array(Y).T\n X = np.array(X).T\n return X, Y\n\ndef getIndexArrForSentence(sentence, T_x):\n arr = np.zeros(T_x)\n words = nltk.word_tokenize(sentence.lower())\n count = 0\n for w in words:\n # Only looking at first 400 words!\n if (count == T_x):\n break\n if w in w2v.wv.vocab:\n arr[count] = wordToIndex[w]\n count = count + 1\n return arr\n\ndef getSentenceForIndexArr(arr):\n sentence = []\n for i in arr:\n sentence.append(indexToWord[int(i)])\n return sentence\n\ndef OneHot(bucket, numBuckets):\n \"\"\"\n Creates onehot vector for our Y output\n Arguments:\n bucket -- index of correct bucket for example in Y\n numBuckets -- number of buckets used to split of the prices of objects\n Returns:\n arr -- onehot array\n \"\"\"\n arr = np.zeros(numBuckets)\n arr[bucket] = 1\n return arr\n\n# ==========\n# MODEL\n# ==========\ndef model(X_train, Y_train, X_dev, Y_dev, learning_rate = 0.01, num_epochs = 3,\n mini_batch_size = 128, Tx = 400, display_step = 1, n_hidden = 64, styleDict={}):\n # Shape of X: (m, Tx, n_x)??? Emmie please check this\n # Shape of Y: (n_y, m)\n print (\"Model has following hyperparameters: learning rate: \" + str(learning_rate) + \", num_epochs: \" + str(num_epochs) + \", mini_batch_size: \" \\\n + str(mini_batch_size) + \", Tx: \"+ str(Tx) + \".\")\n\n # hidden layer num of features\n n_y = 89 # num categories\n n_x = 100 # w2v length\n\n # tf Graph input\n X = tf.placeholder(\"float\", [None, Tx, n_x])\n Y = tf.placeholder(\"float\", [n_y, None])\n # A placeholder for indicating each sequence length\n #Tx = tf.placeholder(tf.int32, [None])\n\n # Define weights\n weights = {\n # 'out': tf.Variable(tf.random_normal([n_hidden, n_y]))\n 'W_1' : tf.get_variable('W_1',[n_hidden,n_hidden], initializer = tf.contrib.layers.xavier_initializer(seed = 1)),\n 'W_2' : tf.get_variable('W_2',[n_hidden,n_hidden], initializer = tf.contrib.layers.xavier_initializer(seed = 1)),\n 'W_out' : tf.get_variable('W_out',[n_hidden, n_hidden], initializer = tf.contrib.layers.xavier_initializer(seed = 1)),\n 'W_f1' : tf.get_variable('W_f1',[n_hidden, n_hidden], initializer = tf.contrib.layers.xavier_initializer(seed = 1)),\n 'W_f2' : tf.get_variable('W_f2',[n_hidden,n_hidden], initializer = tf.contrib.layers.xavier_initializer(seed = 1)),\n 'W_fout' : tf.get_variable('W_fout',[n_hidden,n_y], initializer = tf.contrib.layers.xavier_initializer(seed = 1))\n }\n biases = {\n # 'out': tf.Variable(tf.random_normal([n_y]))\n 'b_1' : tf.get_variable('b_1',[n_hidden], initializer = tf.zeros_initializer()),\n 'b_2' : tf.get_variable('b_2',[n_hidden], initializer = tf.zeros_initializer()),\n 'b_out' : tf.get_variable('b_out',[n_hidden], initializer = tf.zeros_initializer()),\n 'b_f1' : tf.get_variable('b_f1',[n_hidden], initializer = tf.zeros_initializer()),\n 'b_f2' : tf.get_variable('b_f2',[n_hidden], initializer = tf.zeros_initializer()),\n 'b_fout' : tf.get_variable('b_fout',[n_y], initializer = tf.zeros_initializer())\n }\n\n lstm_output = dynamicRNN(X, Tx, weights, biases, n_x, n_hidden)\n tf.nn.dropout(lstm_output, 0.1)\n U = tf.get_variable(\"U\", shape=[n_hidden * 2, n_y], initializer=tf.contrib.layers.xavier_initializer())\n b_last = tf.get_variable(\"b_last\", shape=[1, n_y], initializer=tf.zeros_initializer())\n pred = tf.matmul(lstm_output, U) + b_last\n# pred = tf.Print(pred, [pred], message=\"This is pred: \", summarize=88)\n # Define loss and optimizer\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = pred, labels = tf.transpose(Y)))\n\n # derivative wrt to each embedding\n dc_de = tf.gradients(cost, X)\n sc = tf.linalg.norm(dc_de[0], axis=2)\n\n\n optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost)\n\n # Evaluate model\n predicted_vals = tf.argmax(pred, 1)\n actual_vals = tf.argmax(tf.transpose(Y), 1)\n\n correct_pred = tf.equal(predicted_vals, actual_vals) #Argmax over columns\n num_correct = tf.reduce_sum(tf.cast(correct_pred, tf.float32), name = \"num_correct\")\n\n # Initialize the variables (i.e. assign their default value)\n init = tf.global_variables_initializer()\n\n # Initialize the saver\n saver = tf.train.Saver()\n\n m = Y_train.shape[1]\n num_minibatches = int(math.floor(m/mini_batch_size))\n # Start training\n with tf.Session() as sess:\n # Run the initializer\n sess.run(init)\n for step in range(1, num_epochs + 1):\n epoch_cost =0\n tot_num_correct = 0\n # extract each miniminibatch_X, miniBatch_Y at each\n #make minimatches here (randomly shuffling across m)\n minibatches = random_mini_batches(X_train, Y_train, mini_batch_size = mini_batch_size, seed = 0)\n for minibatch in minibatches:\n (minibatch_X, minibatch_Y) = minibatch\n # Expand mininminibatch_X \n minibatch_X = miniBatchIndicesToEmbedding(minibatch_X, Tx)# print (\"Shape of minibatch_X is \" + str(minibatch_X.shape))\n sess.run(optimizer, feed_dict={X: minibatch_X, Y: minibatch_Y})\n mini_num_correct, loss = sess.run([num_correct, cost], feed_dict={X: minibatch_X, Y: minibatch_Y})\n epoch_cost = epoch_cost + loss\n tot_num_correct = tot_num_correct + mini_num_correct\n # Tx: Tx})\n if step % display_step == 0 or step == 1:\n # Calculate batch accuracy & loss\n #Tx: Tx})\n print(\"Epoch \" + str(step) + \", Cost= \" + \\\n \"{:.6f}\".format(epoch_cost/num_minibatches) + \", Training Accuracy= \" + \\\n \"{:.5f}\".format(float(tot_num_correct/m)))\n\n print(\"Optimization Finished!\")\n train_num_correct = 0\n minibatches = random_mini_batches(X_train, Y_train, mini_batch_size = mini_batch_size, seed = 0)\n for minibatch in minibatches:\n (minibatch_X, minibatch_Y) = minibatch\n minibatch_X = miniBatchIndicesToEmbedding(minibatch_X, Tx)\n num_correct_mb, loss = sess.run([num_correct, cost], feed_dict={X: minibatch_X, Y: minibatch_Y})\n train_num_correct = train_num_correct + num_correct_mb\n print(\"Accuracy for train set: \"+ str(train_num_correct/X_train.shape[1]))\n\n dev_num_correct = 0\n minibatches = random_mini_batches(X_dev, Y_dev, mini_batch_size = mini_batch_size, seed = 0)\n\n f = open('saliency.json', 'w')\n f.write('[\\n')\n for minibatch in minibatches:\n (minibatch_X_ind, minibatch_Y) = minibatch\n minibatch_X = miniBatchIndicesToEmbedding(minibatch_X_ind, Tx)\n minibatch_sent = []\n for m in minibatch_X_ind.T:\n minibatch_sent.append(getSentenceForIndexArr(m))\n num_correct_mb, pr, ac, loss, saliency = sess.run([num_correct, predicted_vals, actual_vals, cost, sc], feed_dict={X: minibatch_X, Y: minibatch_Y})\n predicted = []\n actual = []\n for i in range(pr.shape[0]):\n predicted.append(styleDict[pr[i]])\n actual.append(styleDict[ac[i]])\n predicted = cleanStyles(predicted)\n actual = cleanStyles(actual)\n saliency_chart = list(zip(minibatch_sent, saliency))\n zipped = []\n for ex in saliency_chart:\n zipped.append(list(zip(ex[0], ex[1].tolist())))\n for i in range(len(predicted)):\n f.write('\\t{\\n')\n f.write('\\t\\t\\\"predicted\\\": \\\"' + predicted[i] + '\\\",\\n')\n f.write('\\t\\t\\\"actual\\\": \\\"' + actual[i] + '\\\",\\n')\n f.write('\\t\\t\\\"saliency\\\": ' + json.dumps(zipped[i]) + '\\n')\n f.write('\\t},\\n')\n\n dev_num_correct = dev_num_correct + num_correct_mb\n f.write(']\\n')\n f.close()\n\n\n print(\"Accuracy for dev set: \"+ str(dev_num_correct/X_dev.shape[1]))\n saver.save(sess, './LSTM_model')\n sess.close()\n # # Calculate accuracy\n # test_data = testset.data\n # test_label = testset.labels\n # test_Tx = testset.Tx\n # print(\"Testing Accuracy:\", \\\n # sess.run(accuracy, feed_dict={X: test_data, Y: test_label,\n # Tx: test_Tx}))\n\ndef dynamicRNN(X, Tx, weights, biases, n_x, n_hidden):\n\n # Prepare data shape to match `rnn` function requirements\n # Current data input shape: (m, Tx, n_x)\n # Required shape: 'n_steps' tensors list of shape (batch_size, n_input); or Tx tensors of shape (m, n_x)\n\n # Unstack to get a list of 'n_steps' tensors of shape (batch_size, n_input)\n # X = tf.unstack(X, Tx, 1) #Unstack to be (None, 100) vectors\n # Define a lstm cell with tensorflow\n lstm_cell_fw = tf.contrib.rnn.BasicLSTMCell(n_hidden)\n lstm_cell_bw = tf.contrib.rnn.BasicLSTMCell(n_hidden)\n\n # Get lstm cell output, providing 'sequence_length' will perform dynamic\n # calculation.\n # Z_out, c = tf.contrib.rnn.static_rnn(lstm_cell, X, dtype=tf.float32)\n _, output = tf.nn.bidirectional_dynamic_rnn(lstm_cell_fw, lstm_cell_bw, X, dtype=tf.float32)\n output = (output[0][1], output[1][1])\n lstm_output = tf.concat(output, 1)\n #sequence_length=Tx)\n\n # When performing dynamic calculation, we must retrieve the last\n # dynamically computed output, i.e., if a sequence length is 10, we need\n # to retrieve the 10th output.\n # However TensorFlow doesn't support advanced indexing yet, so we build\n # a custom op that for each sample in batch size, get its length and\n # get the corresponding relevant output.\n\n # 'Z_out' is a list of output at every timestep, we pack them in a Tensor\n # and change back dimension to [batch_size, n_step, n_input]\n # Z_out = tf.stack(Z_out)\n # Z_out = tf.transpose(Z_out, [1, 0, 2])\n\n # # Hack to build the indexing and retrieve the right output.\n # batch_size = tf.shape(Z_out)[0]\n # # Start indices for each sample\n # index = tf.range(0, batch_size) * Tx + (Tx - 1)\n # # Indexing\n # Z_out = tf.gather(tf.reshape(Z_out, [-1, n_hidden]), index)\n # # Deepen LSTM network with fully connected\n # Z_out = tf.matmul(Z_out, weights['W_1']) + biases['b_1']\n # Z_out = tf.nn.relu(Z_out)\n # dropout = tf.layers.dropout(inputs=Z_out, rate=0.1) # Dropout 10% of units\n # Z_out = tf.matmul(Z_out, weights['W_2']) + biases['b_2']\n # Z_out = tf.nn.relu(Z_out)\n # dropout = tf.layers.dropout(inputs=Z_out, rate=0.1)\n # Z_out = tf.matmul(dropout, weights['W_out']) + biases['b_out']\n # Z_out = tf.nn.relu(Z_out) \n\n # # Deepen Full Network\n # Z_out = tf.matmul(Z_out, weights['W_f1']) + biases['b_f1']\n # Z_out = tf.nn.relu(Z_out)\n # Z_out = tf.layers.dropout(inputs=Z_out, rate=0.1) # Dropout 10% of units\n # Z_out = tf.matmul(Z_out, weights['W_f2']) + biases['b_f2']\n # Z_out = tf.nn.relu(Z_out)\n # Z_out = tf.layers.dropout(inputs=Z_out, rate=0.1)\n # Z_out = tf.matmul(Z_out, weights['W_fout']) + biases['b_fout']\n\n # Linear activation, using Z_out computed above\n return lstm_output\n\n\n# =================================================================\n# Helper functions for reading in data/getting minibatches ready\n# =================================================================\ndef getHyperparamsFromJSON(filename):\n parameters = None\n with open(filename, 'r') as fp:\n parameters = json.load(fp)\n return float(parameters['learning_rate']), int(parameters['num_epochs']), int(parameters['batch_size']), int(parameters['Tx'])\n\ndef miniBatchIndicesToEmbedding(minibatch_X, T_x):\n m = minibatch_X.shape[1]# Maximum number of time steps (zero-padded)\n n_x = 100 # Length of words2vec vector for each word\n newArr = np.zeros((m, T_x, n_x))\n for i in range(m): # Iterating through samples\n for j in range(T_x): # Iterating through words\n indexToW2v = 0\n indexToW2v = minibatch_X[j,i]\n if indexToW2v != 0:\n newArr[i,j,:] = np.array(indexToEmb[indexToW2v])\n return newArr\n\ndef random_mini_batches(X, Y, mini_batch_size = 64, seed = 0):\n \"\"\"\n Creates a list of random minibatches from (X, Y)\n\n Arguments:\n X -- input data, of shape (number of examples, Tx)\n Y -- true \"label\" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples)\n mini_batch_size - size of the mini-batches, integer\n seed -- this is only for the purpose of grading, so that you're \"random minibatches are the same as ours.\n\n Returns:\n mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)\n \"\"\"\n m = Y.shape[1] # number of training examples\n mini_batches = []\n np.random.seed(seed)\n # Step 1: Shuffle (X, Y) X shape: (Tx, m) Y shape: (n_y, m)\n permutation = list(np.random.permutation(m))\n print(\"shape X\", X.shape)\n shuffled_X = X[:, permutation]\n shuffled_Y = Y[:, permutation].reshape((Y.shape[0],m)) # not sure why we need to reshape here\n\n # Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.\n num_complete_minibatches = int(math.floor(m/mini_batch_size)) # number of mini batches of size mini_batch_size in your partitionning\n for k in range(0, num_complete_minibatches):\n mini_batch_X = shuffled_X[:, k * mini_batch_size : k * mini_batch_size + mini_batch_size]\n mini_batch_Y = shuffled_Y[:, k * mini_batch_size : k * mini_batch_size + mini_batch_size]\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n\n # Handling the end case (last mini-batch < mini_batch_size)\n if m % mini_batch_size != 0:\n mini_batch_X = shuffled_X[:, num_complete_minibatches * mini_batch_size : m]\n mini_batch_Y = shuffled_Y[:, num_complete_minibatches * mini_batch_size : m]\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n\n return mini_batches\n\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.6500765681266785, "alphanum_fraction": 0.6669219136238098, "avg_line_length": 21.912281036376953, "blob_id": "3da1e591ec096dc51c76af12775cafa606091d69", "content_id": "ad9fa94282fa058308694e38aa6037fd7a43369c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1306, "license_type": "no_license", "max_line_length": 62, "num_lines": 57, "path": "/baseline.py", "repo_name": "inaccare/Beer-Type-Prediction", "src_encoding": "UTF-8", "text": "\nimport nltk\nimport gensim\nimport pandas as pd\nimport numpy as np\nimport sys\nimport codecs\nimport math\n\nfrom sklearn.linear_model import LogisticRegression\n\n\ndef average_vecs(w2v, filename):\n\ttokenizer = nltk.data.load('tokenizers/punkt/english.pickle')\n\twith open(filename) as f:\n\t\tdf = pd.read_csv(f, dtype={'text': str})\n\t\tsamples = []\n\t\tlabels = []\n\t\treviews = df['text']\n\t\tnum_rows = len(df)\n\t\tfor i, review in enumerate(reviews):\n\t\t\tif i % 1000 == 0:\n\t\t\t\tprint str(i) + '/' + str(num_rows)\n\t\t\tif type(review) is not str:\n\t\t\t\tcontinue\n\t\t\tlabels.append(df.iloc[i]['style'])\n\t\t\tsummed = np.zeros((100))\n\t\t\treview_tokenized = nltk.word_tokenize(review.lower())\n\t\t\tfor token in review_tokenized:\n\t\t\t\tif token in w2v.wv.vocab:\n\t\t\t\t\tsummed += w2v[token]\n\t\t\tsummed /= num_rows\n\t\t\tsamples.append(summed)\n\t\treturn np.array(samples), labels\n\n\ndef main():\n\tw2vFile = sys.argv[1]\n\tw2v = gensim.models.word2vec.Word2Vec.load(w2vFile)\n\ttrain_x, train_y = average_vecs(w2v, 'beers_train.csv')\n\tdev_x, dev_y = average_vecs(w2v, 'beers_dev.csv')\n\t\n\tbaseline = LogisticRegression()\n\tbaseline.fit(train_x, train_y)\n\ty_hat = baseline.predict(dev_x)\n\n\tcorrect = 0.0\n\ttotal = len(y_hat)\n\n\tfor i in range(len(y_hat)):\n\t\tif dev_y[i] == y_hat[i]:\n\t\t\tcorrect += 1\n\tacc = correct / total\n\tprint 'accuracy: ' + str(acc)\n\n\n\nmain()" }, { "alpha_fraction": 0.7968923449516296, "alphanum_fraction": 0.8057713508605957, "avg_line_length": 127.71428680419922, "blob_id": "db499fe278aaf436158a96005a61bb8621832396", "content_id": "d92c474c86d4fe695ad7976c8fb3aaf31f902e5d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 901, "license_type": "no_license", "max_line_length": 669, "num_lines": 7, "path": "/README.md", "repo_name": "inaccare/Beer-Type-Prediction", "src_encoding": "UTF-8", "text": "# Beer-Type-Prediction\nPresentation at: https://www.youtube.com/watch?v=HazZTgU0RO8&t=26s\n\nProject Overview:\nWe built a Bi-Directional LSTM model to classify beers by type (i.e. IPA, Imperial Stout, etc.) based on reviews in the RateBeer dataset. After achieving > 44% accuracy with this model of 89 different categories of beer, we used First Derivative Saliency to make our model interpretable. By doing this, we were able to determine which words were most heavily weighted during classification through our saliency heatmaps. For example, for a fruit beer, the words \"lemons\" and \"candy\" were weighted most heavily. We believe other applications of this model could be in attempting to determine tasting notes in various wines or in a similar manner with restaurant reviews.\n\nPlease read paper \"Classifying Beer Styles Using an Interpretable NLP Model\" included for in depth discussion of project\n" }, { "alpha_fraction": 0.6869731545448303, "alphanum_fraction": 0.7022988796234131, "avg_line_length": 27.69230842590332, "blob_id": "9a6c140ff8f0c13817d6f707c06ef1ef6308ca51", "content_id": "7d74d6838c76ff2e3f4c4d3a8e43d6b59aa19531", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2610, "license_type": "no_license", "max_line_length": 114, "num_lines": 91, "path": "/build_w2v.py", "repo_name": "inaccare/Beer-Type-Prediction", "src_encoding": "UTF-8", "text": "from collections import Counter\nimport numpy as np\nimport pandas as pd\nimport re\nimport sys\nimport nltk\nimport codecs\nimport multiprocessing\nimport gensim\n\ndef clean_and_split_str(string):\n strip_special_chars = re.compile(\"[^A-Za-z]+\")\n string = re.sub(strip_special_chars, \" \", string)\n return string.strip().split()\n\ndef main():\n\tif len(sys.argv) > 3 or len(sys.argv) < 2:\n\t\traise Exception(\"usage: python build_w2v.py ratebeer.csv <num_dim>\")\n\n\tnum_dim = 100\n\tif len(sys.argv) == 3:\n\t\tnum_dim = sys.argv[2]\n\n\tinputfile = sys.argv[1]\n\traw_df = pd.read_csv(inputfile, header = 0)\n\tprint(\"data loaded\")\n\n\traw_corpus = u\"\".join(raw_df['review/text'].astype(str) + \" \")\n\tprint(\"Raw Corpus contains {0:,} words\".format(len(raw_corpus.split())))\n\n\ttokenizer = nltk.data.load(\"tokenizers/punkt/english.pickle\")\n\tprint(\"The punkt tokenizer is loaded\")\n\traw_sentences = tokenizer.tokenize(raw_corpus)\n\tprint(\"We have {0:,} raw sentences\".format(len(raw_sentences)))\n\n\tsentences = []\n\tfor raw_sent in raw_sentences:\n\t\tif len(raw_sent) > 0:\n\t\t\tsentences.append(clean_and_split_str(raw_sent.lower()))\n\tprint(\"We have {0:,} clean sentences\".format(len(sentences)))\n\n\ttoken_count = sum([len(sentence) for sentence in sentences])\n\tprint(\"The dataset corpus contains {0:,} tokens\".format(token_count))\n\n\t#Dimensionality of the resulting word vectors\n\tnum_features = num_dim\n\t#Minimum word count threshold\n\tmin_word_count = 2\n\t#Number of threads to run in parallel\n\tnum_workers = multiprocessing.cpu_count() \n\t#Context window length\n\tcontext_size = 7\n\t#Seed for the RNG, to make the result reproducible\n\tseed = 1\n\t\n\tword2vec_model = gensim.models.word2vec.Word2Vec(\n sg=1,\n seed=seed,\n workers=num_workers, \n size=num_features, \n min_count=min_word_count, \n window=context_size)\n\t \n\tword2vec_model.build_vocab(sentences=sentences)\n\tprint(\"The vocabulary is built\")\n\tprint(\"Word2Vec vocabulary length: \", len(word2vec_model.wv.vocab))\n\t \n\t#Start training the model\n\tword2vec_model.train(sentences=sentences, total_examples=word2vec_model.corpus_count, epochs=word2vec_model.iter)\n\tprint(\"Training finished\")\n\n\tword2vec_model.save(\"Beer_reviews_trained.w2v\")\n\tprint(\"Model saved\")\n\n\tw2v_model = gensim.models.word2vec.Word2Vec.load(\"Beer_reviews_trained.w2v\")\n\tprint(\"Model loaded\")\n\n\tcounter = 0\n\tfor word in w2v_model.wv.vocab:\n\t\tprint(\"word: \", word)\n\t\tprint(w2v_model.wv[word])\n\t\tcounter += 1\n\t\tif counter > 10:\n\t\t\tbreak\n\n\t# df_des = df['item_description']\n\t# df_des.to_csv('pandas.txt', header=None, index=None, sep=' ', mode='a', escapechar=\" \")\n\treturn\n\nif __name__ == \"__main__\":\n\tmain()" }, { "alpha_fraction": 0.6259258985519409, "alphanum_fraction": 0.6555555462837219, "avg_line_length": 18.321428298950195, "blob_id": "78dc52c80b7d424fb8d93e0505164dbfa7ea6307", "content_id": "cf4e8b4542805af846d17219eed2b50c37a5d822", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 540, "license_type": "no_license", "max_line_length": 42, "num_lines": 28, "path": "/get_lengths.py", "repo_name": "inaccare/Beer-Type-Prediction", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport json\nimport pandas as pd\n\n\nbins = 30\nbeer_file = 'beers.csv'\n\nx = []\n\nwith open(beer_file) as f:\n\tdf = pd.read_csv(f)\n\tlength = len(df['text'])\n\tfor i, row in enumerate(df['text'][:-1]):\n\t\tif i % 100000:\n\t\t\tprint str(i) + '/' + str(length)\n\t\tx.append(len(row))\n\n\tvals = np.asarray(x, dtype=np.int32)\n\n\tplt.xlabel('Tokens')\n\tplt.ylabel('Number of Reviews')\n\tplt.title('Beer Review Lengths')\n\tplt.hist(vals, bins, range=[0, 1500])\n\t#plt.show()\n\tplt.savefig('review_len.png')" } ]
7
noahdesu/ucsc-transit
https://github.com/noahdesu/ucsc-transit
d70120e6a6b9e25c615d04d78650d68bf5ad909a
d5b6bc81e4af4c843dd467eb7044027078d58ffb
e22c8eec86a438d3caeed8ef6596ba909c4a2653
refs/heads/master
2021-01-21T09:53:55.404844
2017-02-27T19:43:58
2017-02-27T20:08:39
83,350,221
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5744680762290955, "alphanum_fraction": 0.7446808218955994, "avg_line_length": 22.5, "blob_id": "8a2c4a803106c7a263dcdd51ad58847e3f998d9f", "content_id": "933374d6111794ccc5d632806e3d996ad9c73154", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 47, "license_type": "no_license", "max_line_length": 29, "num_lines": 2, "path": "/requirements.txt", "repo_name": "noahdesu/ucsc-transit", "src_encoding": "UTF-8", "text": "requests==2.13.0\ngoogle-cloud-bigquery==0.23.0\n" }, { "alpha_fraction": 0.5437997579574585, "alphanum_fraction": 0.5506256818771362, "avg_line_length": 26.46875, "blob_id": "35c2e83b4d1d41ddbcc26d4ea6866b7ecf338210", "content_id": "8868df05c0629cb279268b3642cd8784a4114714", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 879, "license_type": "no_license", "max_line_length": 65, "num_lines": 32, "path": "/sample.py", "repo_name": "noahdesu/ucsc-transit", "src_encoding": "UTF-8", "text": "import time\nimport datetime\nimport requests\nfrom google.cloud import bigquery\n\nbigquery_client = bigquery.Client()\ndataset = bigquery_client.dataset('locations')\ntable = dataset.table('samples')\n\ntable.reload()\n\nwhile True:\n try:\n r = requests.get('http://bts.ucsc.edu:8081/location/get')\n timestamp = datetime.datetime.utcnow()\n rows = []\n for row in r.json():\n rows.append((timestamp, row['id'], row['lat'], \\\n row['lon'], row['type']))\n if len(rows) > 0:\n errors = table.insert_data(rows)\n if errors:\n msg = \"error {}\".format(errors)\n print msg\n else:\n print 'loaded {} rows'.format(len(rows))\n except Exception as err:\n msg = \"error {}\".format(err)\n print msg\n continue\n finally:\n time.sleep(2)\n" } ]
2
LikeLionSCH/github-practice-A-team
https://github.com/LikeLionSCH/github-practice-A-team
f9ceeb4b5d72f25372a79571fdf51e85e96aed18
cd37f52179aeafea96d7ec2dfcfaa062d7aa5a05
e9d4be9f2eb3ce42ee2d0628a6ee449303d041c4
refs/heads/master
2022-11-18T13:49:38.681935
2020-07-22T11:50:26
2020-07-22T11:50:26
281,620,668
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.739130437374115, "alphanum_fraction": 0.782608687877655, "avg_line_length": 23, "blob_id": "8cb388cc310b74c2c539161b552f9b12d8bcf882", "content_id": "e5da2402e570fa9fa71e44d30aadb13933ae1378", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 23, "license_type": "no_license", "max_line_length": 23, "num_lines": 1, "path": "/branch_tw.py", "repo_name": "LikeLionSCH/github-practice-A-team", "src_encoding": "UTF-8", "text": "print(\"taewan branch1\")" }, { "alpha_fraction": 0.5555555820465088, "alphanum_fraction": 0.5555555820465088, "avg_line_length": 18, "blob_id": "3e3450b906929927d39e7e69c7c8031444b51cfb", "content_id": "26ddfcc4eb908959d8a1250eefff73ea517e0b0f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18, "license_type": "no_license", "max_line_length": 18, "num_lines": 1, "path": "/yulhee.py", "repo_name": "LikeLionSCH/github-practice-A-team", "src_encoding": "UTF-8", "text": "print(\"sorry!!!!\")" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 12, "blob_id": "c80c94fb4a9912e3231ca7348ca716bd6a8c5e54", "content_id": "46ee8fc2cce809405c1f3e348acd9f9b1c271656", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18, "license_type": "no_license", "max_line_length": 12, "num_lines": 1, "path": "/tw.py", "repo_name": "LikeLionSCH/github-practice-A-team", "src_encoding": "UTF-8", "text": "print(\"화이팅\")" }, { "alpha_fraction": 0.7142857313156128, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 28, "blob_id": "f128d4ce252f9000a9177516479b2514ea8c9100", "content_id": "b263ead839c80db8fe120cea0180d7b91db5448c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 28, "license_type": "no_license", "max_line_length": 28, "num_lines": 1, "path": "/newwhite.py", "repo_name": "LikeLionSCH/github-practice-A-team", "src_encoding": "UTF-8", "text": "print(\"I made a new branch\")" }, { "alpha_fraction": 0.6363636255264282, "alphanum_fraction": 0.6363636255264282, "avg_line_length": 22, "blob_id": "c09234357086b8c0aecd866fe840b32cdfc8cbbc", "content_id": "8492c2f2374f05b1f9905a355855eb42ed141e46", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 22, "license_type": "no_license", "max_line_length": 22, "num_lines": 1, "path": "/white.py", "repo_name": "LikeLionSCH/github-practice-A-team", "src_encoding": "UTF-8", "text": "print(\"Hi. I'm White\")" } ]
5
pkmollman/advent_2020
https://github.com/pkmollman/advent_2020
848834a968431211ad168083e7cb66bf0b0b6dd7
d9a84e907dc67abdb97e7dc86109068be4b8080e
f828734421d54b2764e7760123993b35f10a5764
refs/heads/main
2023-01-30T01:41:36.570906
2020-12-11T06:38:18
2020-12-11T06:38:18
317,447,825
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6397820115089417, "alphanum_fraction": 0.6583106517791748, "avg_line_length": 27.671875, "blob_id": "3b03ece4945fd921495d80e8fb8978b1dbffe631", "content_id": "8150fe35ffa4e7428001a5f4003f3df13e9fc970", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1835, "license_type": "no_license", "max_line_length": 106, "num_lines": 64, "path": "/8/5.py", "repo_name": "pkmollman/advent_2020", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nfrom multiprocessing import Pool\n\nadapters = []\n\n# with open('input.txt', 'r') as f:\n# adapters = [int(line.strip()) for line in f.readlines()]\n\nwith open('testinput1.txt', 'r') as f:\n adapters = [int(line.strip()) for line in f.readlines()]\n\nadapters.append(0)\nadapters.sort()\nadapters.append(adapters[-1]+3)\n\nadapters_r = adapters[::-1]\n\ncount_one_gap = 1\ncount_three_gap = 1\n\nglobal g_counter\ng_counter = 0\n\nmain_list = []\n\ndef options(i, slices):\n return [ num for num in slices if (num - i) <= 3 ]\n\ndef options_r(i, adapters, adapter, sliced):\n global g_counter\n for nummy,option in enumerate([ num for num in sliced if (num - adapter) <= 3 ]):\n if option == adapters[-1]:\n g_counter += 1\n option_index = i + nummy + 1\n options_r(option_index,adapters,option,adapters[option_index+1:option_index+4])\n\ndef options_rb(i, adapters, adapter, sliced):\n global g_counter\n for nummy,option in enumerate([ num for num in sliced if (adapter - num) <= 3 ]):\n if option == adapters[-1]:\n g_counter += 1\n option_index = i + nummy + 1\n options_rb(option_index,adapters,option,adapters[option_index+1:option_index+4])\n\n\n\nadapt_len = len(adapters)\nadapt_mid = int(adapt_len//2)\n\nmid_starts = [adapt_mid-1,adapt_mid,adapt_mid+1]\nback_starts = [adapters_r.index(adapters[num-3]) for num in mid_starts]\nprint(back_starts)\nprint(mid_starts)\n# for start in mid_starts:\n# options_r(start, adapters, adapters[start],adapters[start+1:start+4])\n\nfor start in back_starts:\n options_rb(start, adapters_r, adapters_r[start],adapters_r[start+1:start+4])\n\nprint(g_counter)\n# def thread_i_guess(ind,v):\n# option_index = i + ind + 1\n# thingy += options_r3(option_index,adapters,option,adapters[option_index+1:option_index+4],g_counter)\n" }, { "alpha_fraction": 0.5581947565078735, "alphanum_fraction": 0.5724465847015381, "avg_line_length": 34.16666793823242, "blob_id": "7f4b62de6614fe4abdc3fd76344359bfc285210b", "content_id": "2caf7374b8f48b112e07751e3becdd37007c7d43", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 421, "license_type": "no_license", "max_line_length": 79, "num_lines": 12, "path": "/2/1.py", "repo_name": "pkmollman/advent_2020", "src_encoding": "UTF-8", "text": "passwd_list = []\nwith open('input.txt','r') as f:\n for line in f.readlines():\n clean_line = line.strip().split(' ')\n min_max = clean_line[0].split('-')\n letter = clean_line[1][0]\n passwd = clean_line[2]\n passwd_count = passwd.count(letter)\n if passwd_count >= int(min_max[0]) and passwd_count <= int(min_max[1]):\n passwd_list.append(passwd)\n\nprint(len(passwd_list))" }, { "alpha_fraction": 0.5519630312919617, "alphanum_fraction": 0.570438802242279, "avg_line_length": 35.16666793823242, "blob_id": "7c5b876f932d71837df219a93946e76197063266", "content_id": "3485ad4138e0b15ee3d4515ad69afbee9430cc88", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 433, "license_type": "no_license", "max_line_length": 93, "num_lines": 12, "path": "/2/2.py", "repo_name": "pkmollman/advent_2020", "src_encoding": "UTF-8", "text": "passwd_list = []\nwith open('input.txt','r') as f:\n for line in f.readlines():\n clean_line = line.strip().split(' ')\n poses = clean_line[0].split('-')\n letter = clean_line[1][0]\n passwd = clean_line[2]\n passwd_count = passwd.count(letter)\n if bool(passwd[int(poses[0])-1] == letter) ^ bool(passwd[int(poses[1])-1] == letter):\n passwd_list.append(passwd)\n\nprint(len(passwd_list))" }, { "alpha_fraction": 0.5926460027694702, "alphanum_fraction": 0.615717351436615, "avg_line_length": 24.703702926635742, "blob_id": "14bcb77557262b3f933f469bdfba3edd246c88cd", "content_id": "b5eaffd9cbf02dbdb4f28247c4c237d0e2a52717", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1387, "license_type": "no_license", "max_line_length": 87, "num_lines": 54, "path": "/8/2.py", "repo_name": "pkmollman/advent_2020", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nadapters = []\n\nwith open('input.txt', 'r') as f:\n adapters = [int(line.strip()) for line in f.readlines()]\n\n\nadapters.append(0)\nadapters.sort()\nadapters.append(adapters[-1]+3)\n\ncount_one_gap = 1\ncount_three_gap = 1\n\nglobal g_counter\ng_counter = 0\n\nmain_list = []\n\ndef options(i, slices):\n return [ num for num in slices if (num - i) <= 3 ]\n\ndef options_r(i, adapters, adapter, sliced):\n global g_counter\n for option in [ num for num in sliced if (num - adapter) <= 3 ]:\n # print(option)\n if option == adapters[-1]:\n g_counter += 1\n main_list.append(option)\n # return g_counter\n option_index = adapters.index(option)\n options_r(option_index,adapters,option,adapters[option_index+1:option_index+4])\n\noption_multiplier = 1\n\nprint(options_r(0,adapters,adapters[0],adapters[1:4]))\nprint(main_list)\nprint(g_counter)\n\n# for i,adapter in enumerate(adapters):\n# if i == 0:\n# pass\n# elif adapter - adapters[i-1] == 1:\n# count_one_gap += 1\n# elif adapter - adapters[i-1] == 3:\n# count_three_gap += 1\n# print(adapter)\n# print(options(adapter, adapters[i+1:i+4]))\n# if len(options(adapter, adapters[i+1:i+4])) > 0:\n# option_multiplier *= len(options(adapter, adapters[i+1:i+4]))\n\n# print(count_one_gap * count_three_gap)\n# print(option_multiplier)" }, { "alpha_fraction": 0.5175023674964905, "alphanum_fraction": 0.5373699069023132, "avg_line_length": 22.46666717529297, "blob_id": "c610febfee4436fbf00a3b3ba613110fb2ad2fdc", "content_id": "306df50ebbdf7f69930e69c37cc69d8b8b2a3079", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1057, "license_type": "no_license", "max_line_length": 66, "num_lines": 45, "path": "/5/2.py", "repo_name": "pkmollman/advent_2020", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\ndef seat_id(seat):\n\n row_max = 128\n row_min = 0\n row_nums = list(range(row_min,row_max))\n\n for char in seat[:7]:\n if char == 'F':\n row_nums = row_nums[:(len(row_nums))//2]\n continue\n row_nums = row_nums[int((len(row_nums))//2):]\n # print(row_nums)\n\n column_max = 8\n column_min = 0\n column_nums = list(range(column_min,column_max))\n\n for char in seat[-3:]:\n if char == 'L':\n column_nums = column_nums[:int((len(column_nums))//2)]\n continue\n column_nums = column_nums[int((len(column_nums))//2):]\n # print(column_nums)\n return row_nums[0] * 8 + column_nums[0]\n\nseat_ids = []\n\nwith open('input.txt', 'r') as f:\n for line in f.readlines():\n seat_ids.append(seat_id(line.strip()))\n # print(line)\n\nseat_ids.sort()\nprint(seat_ids[-1])\n\nseat_ids.sort()\n\nfor seat in range(len(seat_ids)):\n if seat == 0:\n pass\n else:\n if seat_ids[seat-1] != seat_ids[seat]-1:\n print(seat_ids[seat]-1)\n\n" }, { "alpha_fraction": 0.5655339956283569, "alphanum_fraction": 0.5902912616729736, "avg_line_length": 23.83132553100586, "blob_id": "c295fb77a0a90370e659cac799c9b3e563960014", "content_id": "068d1384ca4d42c661fe6f242bc53a96b0e1be1e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2060, "license_type": "no_license", "max_line_length": 100, "num_lines": 83, "path": "/4/1.py", "repo_name": "pkmollman/advent_2020", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport re\n\npassports_text = ''\n\nwith open('input.txt', 'r') as f:\n for line in f.readlines():\n passports_text += line\n\ndef byr(value):\n if len(value) == 4 and int(value) >= 1920 and int(value) <= 2002:\n return True\n return False\ndef iyr(value):\n if len(value) == 4 and int(value) >= 2010 and int(value) <= 2020:\n return True\n return False\ndef eyr(value):\n if len(value) == 4 and int(value) >= 2020 and int(value) <= 2030:\n return True\n return False\ndef hgt(value):\n if 'cm' in value[-2:] and 150 <= int(value[:-2]) <= 193:\n return True\n if 'in' in value[-2:] and 59 <= int(value[:-2]) <= 76:\n return True\n return False\ndef hcl(value):\n return re.match('^#[0-9a-f]{6}$',value)\ndef ecl(value):\n return value in ['amb','blu','brn','gry','grn','hzl','oth']\ndef pid(value):\n # this is trash, shoot me\n return re.match('^[0-9]{9}$',value)\n\nrequired_values = {\n 'byr': byr,\n 'iyr': iyr,\n 'eyr': eyr,\n 'hgt': hgt,\n 'hcl': hcl,\n 'ecl': ecl,\n 'pid': pid,\n}\n\nunsep_passports = passports_text.split('\\n')\npassports = []\npassport_group = []\n\nfor line in unsep_passports:\n if line == '':\n passports.append(passport_group)\n passport_group = []\n elif line == unsep_passports[-1]:\n passport_group.append(line)\n passports.append(passport_group)\n else:\n passport_group.append(line)\n\nclean_passports = []\n\nfor passport in passports:\n clean_passport = {}\n for line in passport:\n for key_value in line.split(' '):\n k,v = key_value.split(':')\n clean_passport[k] = v\n clean_passports.append(clean_passport)\n\nvalid_passports = 0\n\nfor passport in clean_passports:\n valid_passport = True\n for required_value in required_values:\n if required_value in passport and required_values[required_value](passport[required_value]):\n pass\n else:\n valid_passport = False\n if valid_passport:\n valid_passports += 1\n\nprint(valid_passports)" }, { "alpha_fraction": 0.6149377822875977, "alphanum_fraction": 0.6390041708946228, "avg_line_length": 26.409090042114258, "blob_id": "ecfcf4f64340f14bca17023d10e463e75050f4cc", "content_id": "95e34e1a84a5504392cd05952b3142a9e98c5769", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1205, "license_type": "no_license", "max_line_length": 108, "num_lines": 44, "path": "/8/3.py", "repo_name": "pkmollman/advent_2020", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nadapters = []\n\nwith open('input.txt', 'r') as f:\n adapters = [int(line.strip()) for line in f.readlines()]\n\n\nadapters.append(0)\nadapters.sort()\nadapters.append(adapters[-1]+3)\n\ncount_one_gap = 1\ncount_three_gap = 1\n\nglobal g_counter\n# g_counter = 0\n\nmain_list = []\n\ndef options(i, slices):\n return [ num for num in slices if (num - i) <= 3 ]\n\ndef options_r(i, adapters, adapter, sliced):\n global g_counter\n for nummy,option in enumerate([ num for num in sliced if (num - adapter) <= 3 ]):\n if option == adapters[-1]:\n g_counter += 1\n option_index = i + nummy + 1\n options_r(option_index,adapters,option,adapters[option_index+1:option_index+4])\n\ndef options_r2(i, adapters, adapter, sliced, g_counter):\n thingy = g_counter\n for nummy,option in enumerate([ num for num in sliced if (num - adapter) <= 3 ]):\n if option == adapters[-1]:\n thingy += 1\n option_index = i + nummy + 1\n thingy += options_r2(option_index,adapters,option,adapters[option_index+1:option_index+4],g_counter)\n return thingy\n\noption_multiplier = 1\n\ng_counter = options_r2(0,adapters,adapters[0],adapters[1:4],0)\nprint(g_counter)" }, { "alpha_fraction": 0.638436496257782, "alphanum_fraction": 0.6416938304901123, "avg_line_length": 26.909090042114258, "blob_id": "96cef93af14cd3ea6bd0972ffd91e3c926d059d8", "content_id": "751a523388b5d6feaea9c2d2a8683c516c8df5f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1228, "license_type": "no_license", "max_line_length": 73, "num_lines": 44, "path": "/6/2.py", "repo_name": "pkmollman/advent_2020", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\ninput_text = ''\n\nwith open('input.txt', 'r') as f:\n input_text = f.read().split('\\n')\n\ntext_groups = []\ntext_group = []\n\n# for single line of answers append to a list,\n# once a blank line is reached, append list to the main list,\n# and reset current working list to empty\nfor line in input_text:\n if line == '':\n text_groups.append(text_group)\n text_group = []\n continue\n text_group.append(line)\n\ngroup_answer_total = 0\n\n# for list of answer strings in text_groups\nfor group in text_groups:\n # track group's unique answers\n letters = []\n for answers in group:\n for letter in answers:\n if letter in letters:\n continue\n letters.append(letter)\n # track all group's answer strings in a single string\n all_answers = ''\n for answers in group:\n all_answers += answers\n # track group's valid answers\n group_total = 0\n # if unique letter count in combined answer string\n # equals number of group members the answer is counted in group_total\n for letter in letters:\n if all_answers.count(letter) == len(group): group_total += 1\n group_answer_total += group_total\n\nprint(group_answer_total)\n" }, { "alpha_fraction": 0.5767441987991333, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 19.5238094329834, "blob_id": "fc89b44565edaedd570dcfceb8feb2237c18dd4d", "content_id": "574c5c19b3492f7e1a90804cd8a550331bcafe66", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 430, "license_type": "no_license", "max_line_length": 60, "num_lines": 21, "path": "/8/1.py", "repo_name": "pkmollman/advent_2020", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nadapters = []\n\nwith open('input.txt', 'r') as f:\n adapters = [int(line.strip()) for line in f.readlines()]\n\nadapters.sort()\n\ncount_one_gap = 1\ncount_three_gap = 1\n\nfor i,adapter in enumerate(adapters):\n if i == 0:\n pass\n elif adapter - adapters[i-1] == 1:\n count_one_gap += 1\n elif adapter - adapters[i-1] == 3:\n count_three_gap += 1\n\nprint(count_one_gap * count_three_gap)" }, { "alpha_fraction": 0.5768194198608398, "alphanum_fraction": 0.584007203578949, "avg_line_length": 26.170732498168945, "blob_id": "4960e71d5b7e4ddccc1cc0b1a0efeaca40894c3d", "content_id": "a676844c6de9a5134b9614c4cb4fe0061294d02c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1113, "license_type": "no_license", "max_line_length": 78, "num_lines": 41, "path": "/7/1.py", "repo_name": "pkmollman/advent_2020", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport re\n\n# REGEX = r'([\\w\\s]+)bags contain((\\s\\d+)\\s([\\w\\s]+)bags?[\\.,])+'\nmain_bag_reg = r'([\\w\\s]+)bags contain'\ninner_bag_reg = r'((\\s\\d+)\\s([\\w\\s]+) bags?)+'\ninput_text = ''\n\nwith open('input.txt', 'r') as f:\n input_text = [line for line in f.readlines()]\n\nbag_types = {}\n\nfor line in input_text:\n main_result = re.match(main_bag_reg, line)\n inner_result = re.findall(inner_bag_reg, line)\n bag_types[main_result.group(1).strip()] = {}\n for result in inner_result:\n bag_types[main_result.group(1).strip()][result[2].strip()] = result[1]\n\ncontains_shiny_golds = []\n\nfor bag in bag_types:\n # print(bag_types[bag])\n if 'shiny gold' in bag_types[bag]:\n contains_shiny_golds.append(bag)\n\n\nwhile True:\n inner_num = 0\n for bag_type in bag_types:\n for bag in contains_shiny_golds:\n if bag in bag_types[bag_type]:\n if not bag_type in contains_shiny_golds:\n contains_shiny_golds.append(bag_type)\n inner_num += 1\n if inner_num == 0:\n break\n\nprint(len(contains_shiny_golds))" }, { "alpha_fraction": 0.6105263233184814, "alphanum_fraction": 0.6168420910835266, "avg_line_length": 24, "blob_id": "0e0fbd6f149fb7c4139c21d2163da07b484a91b6", "content_id": "f595d3433cd76ff9f2edc8849286c9d342458140", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 950, "license_type": "no_license", "max_line_length": 83, "num_lines": 38, "path": "/7/2.py", "repo_name": "pkmollman/advent_2020", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport re\n\nmain_bag_reg = r'([\\w\\s]+)bags contain'\ninner_bag_reg = r'((\\s\\d+)\\s([\\w\\s]+) bags?)+'\ninput_text = ''\n\nwith open('input.txt', 'r') as f:\n input_text = [line for line in f.readlines()]\n\nbag_types = {}\n\nfor line in input_text:\n main_result = re.match(main_bag_reg, line)\n inner_result = re.findall(inner_bag_reg, line)\n bag_types[main_result.group(1).strip()] = {}\n for result in inner_result:\n bag_types[main_result.group(1).strip()][result[2].strip()] = int(result[1])\n\nshiny_golds = list(bag_types['shiny gold'].keys())\n\nnumber_o_bags = 0\n\ndef num_bag(bag_type, number):\n numbag = number\n if bag_types[bag_type]:\n for bag in bag_types[bag_type]:\n numbag += num_bag(bag, bag_types[bag_type][bag]) * number\n else:\n return number\n return numbag\n\n\nfor bag in shiny_golds:\n number_o_bags += num_bag(bag, bag_types['shiny gold'][bag])\n\nprint(number_o_bags)\n" }, { "alpha_fraction": 0.5201109647750854, "alphanum_fraction": 0.545076310634613, "avg_line_length": 19.05555534362793, "blob_id": "aab43044098768b922e01a1c772fe504196f1437", "content_id": "b883688bd458b788d7b487a1e66a6b83c04213b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 721, "license_type": "no_license", "max_line_length": 55, "num_lines": 36, "path": "/3/1.py", "repo_name": "pkmollman/advent_2020", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nmatrix = []\nlocation = [0,0]\ntrees = 0\nbig_number = 0\nslopes = [\n [3,1],\n]\n\nwith open('input.txt', 'r') as f:\n for line in f.readlines():\n matrix.append(line.strip())\n\ndef tree_or_nah(matrix,x,y):\n r_x = (x % len(matrix[0]))\n print(r_x, y)\n print(matrix[y])\n if matrix[y][r_x] == '#':\n return True\n return False\n\nfor slope in slopes:\n while location[1] < len(matrix)-1:\n location[0] += slope[0]\n location[1] += slope[1]\n print(location)\n if tree_or_nah(matrix,location[0],location[1]):\n trees += 1\n if big_number == 0:\n big_number = trees\n else:\n big_number *= trees\n\nprint(trees)\nprint(big_number)" }, { "alpha_fraction": 0.5184404850006104, "alphanum_fraction": 0.5447840094566345, "avg_line_length": 22.75, "blob_id": "9a2c1d67a94ac879979e75194548dcde83db6b9b", "content_id": "09beea555fccf92004aa9cf5d7c9e209f39ab706", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 949, "license_type": "no_license", "max_line_length": 69, "num_lines": 40, "path": "/5/1.py", "repo_name": "pkmollman/advent_2020", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\ndef seat_id(seat):\n\n row_max = 128\n row_min = 0\n row_nums = list(range(row_min,row_max))\n\n for char in seat[:7]:\n if char == 'F':\n row_nums = row_nums[:int((len(row_nums)-1)/2)+1]\n if char == 'B':\n row_nums = row_nums[int((len(row_nums)-1)/2)+1:]\n print(row_nums)\n\n column_max = 8\n column_min = 0\n column_nums = list(range(column_min,column_max))\n\n for char in seat[-3:]:\n if char == 'L':\n column_nums = column_nums[:int((len(column_nums)-1)/2)+1]\n if char == 'R':\n column_nums = column_nums[int((len(column_nums)-1)/2)+1:]\n print(column_nums)\n return row_nums[0] * 8 + column_nums[0]\n\n\nprint(seat_id('BBFFBBFRLL'))\n\nseat_ids = []\n\nwith open('input.txt', 'r') as f:\n for line in f.readlines():\n seat_ids.append(seat_id(line.strip()))\n print(line)\n\nseat_ids.sort()\nprint(seat_ids)\nprint(seat_ids[-1])" }, { "alpha_fraction": 0.6631355881690979, "alphanum_fraction": 0.6652542352676392, "avg_line_length": 24.513513565063477, "blob_id": "d4241fb400559a1299262c675071b5db862fe4c7", "content_id": "89503c3597d547e3d935891f3980a1bfe5e2fe07", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 944, "license_type": "no_license", "max_line_length": 73, "num_lines": 37, "path": "/6/1.py", "repo_name": "pkmollman/advent_2020", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\ninput_text = ''\n\nwith open('input.txt', 'r') as f:\n input_text = f.read().split('\\n')\n\n# track each group of answers as a single string\ntext_groups = []\n\n# track current group for below iteration \ntext_group = ''\n\n# for single line of answers append to a single string,\n# once a blank line is reached, append to the main list,\n# and reset current working string to empty\nfor line in input_text:\n if line == '':\n text_groups.append(text_group)\n text_group = ''\n continue\n text_group += line\n\n# total sum of unique answers per group\ngroup_answer_total = 0\n\n# for char in group answer string, if char not in letters list, append it\n# len(letters) is group total unique answers\nfor group in text_groups:\n letters = []\n for letter in group:\n if letter in letters:\n continue\n letters.append(letter)\n group_answer_total += len(letters)\n\nprint(group_answer_total)\n" } ]
14
dustiny5/DS-Unit-3-Sprint-2-SQL-and-Databases
https://github.com/dustiny5/DS-Unit-3-Sprint-2-SQL-and-Databases
29d117ccb75b14ce73f626356062a163f0ad6546
72b6cb822a679705d28066e63e8d70f07a36ff29
3d22bb86be195e286f7337577a206c79f1332557
refs/heads/master
2020-05-30T17:02:27.971317
2019-06-07T17:55:42
2019-06-07T17:55:42
189,862,563
0
0
null
2019-06-02T15:21:50
2019-03-25T17:58:26
2019-06-01T03:05:54
null
[ { "alpha_fraction": 0.6952314972877502, "alphanum_fraction": 0.7069799304008484, "avg_line_length": 67.92857360839844, "blob_id": "e65bfa78e5c3ed26b55bc265974adbd7473769bd", "content_id": "1d890dbe82bb186f4bbfaf9b766db045f8ee269e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2894, "license_type": "permissive", "max_line_length": 312, "num_lines": 42, "path": "/sprint2/northwind.py", "repo_name": "dustiny5/DS-Unit-3-Sprint-2-SQL-and-Databases", "src_encoding": "UTF-8", "text": "import sqlite3\n\n# Connect to northwind_small database\nconn = sqlite3.connect('northwind_small.sqlite3')\n\n# Read through its database to query later on\ncurs = conn.cursor()\n\n# Save db in result_product\nresult_10 = curs.execute('SELECT ProductName, UnitPrice FROM Product ORDER BY UnitPrice DESC LIMIT 10;').fetchall()\nprint('Top 10 expensive items:', result_10)\nprint('###################################')\n\nresult_avg_age = curs.execute('SELECT ROUND(AVG(HireDate - BirthDate), 2) FROM Employee').fetchall()\nprint('Avg age of employees at hiring date:', result_avg_age[0][0])\nprint('###################################')\n\nresult_city = curs.execute('SELECT City, HireDate, AVG(HireDate - BirthDate) FROM Employee GROUP BY City;').fetchall()\nprint('Avg age of employees at hiring date by city:', result_city)\nprint('###################################')\n\n################## PART 3 ########################\n\n# Note to self - when there's a primary key use Join on supplierid DO NOT use p.supplierid = s.supplierid\nresult_10_supplier = curs.execute('SELECT p.ProductName, s.CompanyName, p.UnitPrice FROM Supplier AS s INNER JOIN Product AS p ON SupplierID ORDER BY UnitPrice DESC LIMIT 10;').fetchall()\nprint('Top 10 expensive product and their supplier', result_10_supplier)\nprint('###################################')\n\nresult_large_category = curs.execute('SELECT CategoryID, COUNT(p.ProductName) FROM Category AS c INNER JOIN Product as p ON CategoryID GROUP BY CategoryID ORDER BY COUNT(p.ProductName) DESC LIMIT 1;').fetchall()\nprint('Largest category,', result_large_category[0][0], ', with number of unique products:', result_large_category[0][1])\nprint('###################################')\n\nresult_territory = curs.execute('SELECT EmployeeID, COUNT(TerritoryDescription) FROM EmployeeTerritory INNER JOIN Territory ON TerritoryID GROUP BY EmployeeID ORDER BY COUNT(TerritoryDescription) DESC LIMIT 1;').fetchall()\n\nprint('EmployeeID:,', result_territory[0][0], ', with the most territory:', result_territory[0][1])\n########### PART 4 ############\n\n# The type of relationship between employee and territory tables is none. However once you join the table employee territory to either emplyoee or territory then you will get a one to one relationship. You can join a second time on the primary key, EmployeeID.\n\n# Mongo DB is an object based documentation that handles and stores big data. It creates a dictionary where you're given a unique key and its value is a json dictionary(strings are keys and anything type for values) It is not good for querying data compared to sqlite or other querying languages like postgresql.\n\n# NewSQL tries to blend traditional query language to NOSQL languages. Traditional languages follow the ACID principles and NOSQL is lenient on the A,I, and D principle(the C, consistent, is kept). Basically, try to use traditional query langauages on big data." }, { "alpha_fraction": 0.6915380358695984, "alphanum_fraction": 0.697255551815033, "avg_line_length": 31.70093536376953, "blob_id": "9c563c3afce6780d82752778b8c00624f77004e0", "content_id": "4a3666d1a54f4c72ba47d30476df6ea401d59bbb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3498, "license_type": "permissive", "max_line_length": 85, "num_lines": 107, "path": "/module2-sql-for-analysis/lecture.py", "repo_name": "dustiny5/DS-Unit-3-Sprint-2-SQL-and-Databases", "src_encoding": "UTF-8", "text": "# Local install. Go to the desired dir and local install\n## pipenv install psycopg2-binary\n\n# Activate\n## pipenv shell\n\nimport psycopg2\n\n\n# From help(psycopg2.connect) - Enter the bottom 4.\ndbname = 'ximkxdxb'\nuser = 'ximkxdxb'\npassword = '' # Don't commit this. delete when you commit/push\nhost = 'raja.db.elephantsql.com'\n\n# Connect to Elephant Database\npg_conn = psycopg2.connect(dbname=dbname, user=user, password=password, host=host)\n\n# Set-up a cursor to read the elephant database\npg_curs = pg_conn.cursor()\n\n# Query and fetchall info in elephant database\npg_curs.execute('SELECT * FROM test_table;')\nprint(pg_curs.fetchall())\n\n# ETL - Extract, Transform, Load Pipeline\nimport sqlite3\n\n# Use sqlite3 and retrieve the RPG database\nsl_conn = sqlite3.connect('../module1-introduction-to-sql/rpg_db.sqlite3')\nprint(sl_conn) # Test to see imported library is retrieved\n\n# Setup a sqlite3 cursor to read the database\nsl_curs = sl_conn.cursor()\n\n# Query and fetchall() from rpg database\nprint(sl_curs.execute('SELECT COUNT(*) FROM charactercreator_character;').fetchall())\n\n# From the rpg database, get all the columns from the table (charcre_char)\n# Extract from sqlite3 rpg database\ncharacters = sl_curs.execute('SELECT * FROM charactercreator_character;').fetchall()\nprint(len(characters)) # Same as COUNT()\n\n# Look at the first characters and last characters\nprint('First line:', characters[0], 'Last line:', characters[-1])\n\n# CREATE TABLE to load to PostgreSQL - This will be our character schema\n# Pre- Load\n# Sets up table to be loaded to postgreSQL database\ncreate_character_table = '''\n CREATE TABLE charactercreator_character (\n character_id SERIAL PRIMARY KEY,\n name VARCHAR(30),\n level INT,\n exp INT,\n hp INT,\n strength INT,\n intelligence INT,\n dexterity INT,\n wisdom INT\n );\n'''\n############################################################\n# Read through the table we created above\n#print(pg_curs.execute(create_character_table))\n\n# Define INSERT INTO \nattempted_insert = '''\n INSERT INTO charactercreator_character\n VALUES ''' + str(characters[0])\nprint(attempted_insert)\n\n# INSERT INTO created table\n#print(pg_curs.execute(attempted_insert))\n\n# Changing Databases - CREATE or INSERT a table. You have to commit to make it work\npg_conn.commit() \n###############################################################\n# Can't create duplicate so comment out the above ####### to #########\n\n# After commit - Query the newly created and insert info\npg_curs.execute('SELECT * FROM charactercreator_character;')\npg_characters = pg_curs.fetchall()\nprint(pg_curs.fetchall())\n\n# Only INSERT INTO 1 row\nprint('Row:',pg_characters[0], 'Row Length:', len(pg_characters))\n\n# INSERT INTO the rest of the rows to postgresql database\n# for character in characters[1:]:\n# insert_character = '''\n# INSERT INTO charactercreator_character\n# VALUES ''' + str(character)\n# pg_curs.execute(insert_character)\npg_conn.commit() # Can't duplicate so comment out the for loop\n\n# Restart the cursor\npg_curs = pg_conn.cursor()\n# Query the finished table from loaded to postgresql database\npg_curs.execute('SELECT * FROM charactercreator_character')\n# Fetch all table\npg_characters = pg_curs.fetchall()\nprint('Row:', pg_characters, 'Row Length:', len(pg_characters))\n\n# Test our RPG database to our PostgreSQL database - No Error\nfor character, pg_character in zip(characters, pg_characters):\n assert character == pg_character" }, { "alpha_fraction": 0.7256371974945068, "alphanum_fraction": 0.7466266751289368, "avg_line_length": 32.400001525878906, "blob_id": "b888f040374b3a2345d2325107e77130baccc761", "content_id": "9af415d603e274053b873c4255960ca1f23df2b0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 667, "license_type": "permissive", "max_line_length": 194, "num_lines": 20, "path": "/module1-introduction-to-sql/query_part2.py", "repo_name": "dustiny5/DS-Unit-3-Sprint-2-SQL-and-Databases", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport sqlite3\nfrom sqlalchemy import create_engine\n\n# Engine use for sql query\nengine = create_engine('sqlite://', echo=False)\n\n# DataFrame\ndf = pd.read_csv('buddymove_holidayiq.csv')\n\n# Create a new Database\nconn = sqlite3.connect('buddymove_holiday.sqlite3')\n\n# Use df as sql\ndf.to_sql('buddymove_holiday', con=engine)\n\n# Use the engine to execute query\nprint('The count is:', engine.execute('SELECT COUNT(*) FROM buddymove_holiday').fetchone()[0])\n\nprint('The number of users who reviewed at least 100 Nature and Shopping category:', engine.execute('SELECT COUNT(*) FROM buddymove_holiday WHERE Nature>=100 AND Shopping >= 100').fetchone()[0])" }, { "alpha_fraction": 0.6687822341918945, "alphanum_fraction": 0.6751289367675781, "avg_line_length": 23.25, "blob_id": "76deeef3c883d1255db0c97de36a627edf17346d", "content_id": "89ea5d30114b53b5ea21b05173c258e4c276467b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2521, "license_type": "permissive", "max_line_length": 82, "num_lines": 104, "path": "/module2-sql-for-analysis/insert_titanic.py", "repo_name": "dustiny5/DS-Unit-3-Sprint-2-SQL-and-Databases", "src_encoding": "UTF-8", "text": "import psycopg2\n\n# Re Enter\ndbname = 'eczastgf'\nuser = 'eczastgf'\npassword = ''\nhost = 'raja.db.elephantsql.com'\n\n# Connect to ElephantSQL\npg_conn = psycopg2.connect(dbname=dbname, user=user, password=password, host=host)\n\n# Read the ElephantSQL database\npg_curs = pg_conn.cursor()\n\n# Df to titanic database\nimport pandas as pd\nfrom sqlalchemy import create_engine\n\n# Save df\ndf = pd.read_csv('titanic.csv')\n\n# Replace ' with an empty space\ndf['Name'] = df['Name'].str.replace('\\'', ' ')\n\n# Create engine - Used to convert a dataframe to database\nengine = create_engine('sqlite://', echo=False)\n\n# Create the new table on sql db\ndf.to_sql('titanic', con=engine)\n\n# Titanic df is now a db - Is a list of tuples\ntitanic_db = engine.execute('SELECT * FROM titanic;').fetchall()\n\n# Create an empty db for ElephantSQL\ncreate_titanic_table = '''\n CREATE TABLE titanic (\n id integer NOT NULL PRIMARY KEY,\n survived INT,\n pclass INT,\n name VARCHAR(100),\n sex VARCHAR(10),\n age INT,\n siblings_spouses_aboard INT,\n parents_child_aboard INT,\n fare INT\n );\n'''\n\n# Read thourgh the table we created\npg_curs.execute(create_titanic_table)\n\n# INSERT INTO the rest of the rows to postgresql database\nfor row in titanic_db:\n insert_row = '''\n INSERT INTO titanic\n VALUES ''' + str(row)\n pg_curs.execute(insert_row)\n\n# Commit to Elephantsql and test from browser: SELECT * FROM titanic\npg_conn.commit()\n\n# Create a local titanic db then upload to elephant db\n\"\"\"\nimport sqlite3\n\n# Reset index to get an index,id\ndf = df.reset_index().rename(columns={'index':'id'})\n\n# Save a create a titanic database\nsl_conn = sqlite3.connect('titanic.sqlite3')\n\n# CREATE TYPE for titanic sex\n# CREATE TABLE for titanic database\ncreate_titanic_table = '''\n CREATE TABLE titanic (\n id integer NOT NULL PRIMARY KEY AUTOINCREMENT,\n survived INT,\n pclass INT,\n name VARCHAR(100),\n sex VARCHAR(10),\n age INT,\n siblings_spouses_aboard INT,\n parents_child_aboard INT,\n fare INT\n );\n'''\n\n# Read the database\nsl_curs = sl_conn.cursor()\n\n# Create the table for the titanic database\n#sl_curs.execute(create_titanic_table)\n## Comment out since already created\n\n# Define INSERT INTO\ninsert_titanic_db = '''\n INSERT INTO titanic\n VALUES ''' + str(tuple(df.loc[0, :].tolist()))\n\nsl_curs.execute(insert_titanic_db)\n\n# Fetch all data in titanic database\nprint(sl_curs.execute('SELECT * FROM titanic;').fetchall())\n\"\"\"" }, { "alpha_fraction": 0.6910856366157532, "alphanum_fraction": 0.7007943391799927, "avg_line_length": 25.057470321655273, "blob_id": "c64d7fffdf5c223f6b7ba61d9e59ee4d32d1fbe6", "content_id": "453d8717e4f6a9efeda041cc92da3be52117847e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2266, "license_type": "permissive", "max_line_length": 84, "num_lines": 87, "path": "/sprint_practice/sprint_practice.py", "repo_name": "dustiny5/DS-Unit-3-Sprint-2-SQL-and-Databases", "src_encoding": "UTF-8", "text": "# From the cmd prompt set-up a virtual enviornment \n# `pipenv install ...` is the dir\n\n# Run `pipenv shell` to activate\n\nimport sqlite3\n\n# Connect to a database and if there's is none it will create one\nconn = sqlite3.connect('rpg_db.sqlite3')\n\n# Used to make queries\ncurs = conn.cursor()\n\n# Execute query and fetch all the data\nresult = curs.execute('SELECT * FROM armory_item;')\nresult.fetchall()\n# print(result.fetchall())\n\n############### Create a new table ###############\n\n# 'toy_db_sqlite3' doesn't exist so it will create one\nconn = sqlite3.connect('toy_db_sqlite3')\n\n# Create a table called toy\ncreate_toy_table = '''CREATE TABLE toy (\n toy_id integer NOT NULL PRIMARY KEY AUTOINCREMENT,\n toy_name VARCHAR(30),\n price NUMERIC,\n small_parts integer\n)\n'''\n\n# Create a cursor to make queries\ncurs = conn.cursor()\n\n# Execute `create_toy_table` to create the table\n## Is empty right now since we didn't populate it\n#curs.execute(create_toy_table)\n## Comment out above line since we can't create duplicates\n# print(curs.execute(create_toy_table).fetchall())\n\n# INSERT INTO toy VALUES to create new values of fields/col\n\ninsert = \"INSERT INTO toy VALUES (1, 'Legos', 10.5, 1), (2, 'Train', 17.6, 0);\"\n\n# Execute to insert new values to toy table\ncurs.execute(insert)\n\n# Fetch all from toy database - toy_db is a list\ntoy_db = curs.execute('SELECT * FROM toy;').fetchall()\n\n######### Try to launch it to elephantsql #####\n\nimport psycopg2\n\n# Enter/Re enter details from elephantsql\n\ndbname = 'oxwfzsio'\nuser = 'oxwfzsio'\npassword = ''\nhost = 'raja.db.elephantsql.com'\n\n# connect to elephantsql\npg_conn = psycopg2.connect(dbname=dbname, user=user, password=password, host=host)\n\n# Used to read the database on elephantsql\npg_curs = pg_conn.cursor()\n\n# Create an empty table for ElephantSQL\ncreate_toy_table_esql = '''CREATE TABLE toy (\n toy_id integer NOT NULL PRIMARY KEY,\n toy_name VARCHAR(30),\n price NUMERIC,\n small_parts integer\n)\n'''\n# Execute to create an empty table for ESQL\npg_curs.execute(create_toy_table_esql)\n\n# # Execute previous created toy database\nfor row in toy_db:\n insert_row = '''\n INSERT INTO toy VALUES ''' + str(row)\n pg_curs.execute(insert_row)\n\n# Commit to see the changes on elephantsql\npg_conn.commit()" }, { "alpha_fraction": 0.5940678119659424, "alphanum_fraction": 0.6101694703102112, "avg_line_length": 28.524999618530273, "blob_id": "3e72a53cd7649f40f0078800f2ff6ad8cd560b83", "content_id": "f85a24fb589934eb83b54307513a6172640e6eb3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1180, "license_type": "permissive", "max_line_length": 93, "num_lines": 40, "path": "/sprint2/demo_data.py", "repo_name": "dustiny5/DS-Unit-3-Sprint-2-SQL-and-Databases", "src_encoding": "UTF-8", "text": "import sqlite3\n\n# Connect to a database\nconn = sqlite3.connect('demo_data.sqlite3')\n\n# Make a cursor to read the database\ncurs = conn.cursor()\n\n# Create a table\ncreate_demo_data = '''\nCREATE TABLE demo (\n s CHAR(1),\n x INT,\n y INT\n)\n'''\n\n# Execute to create demo data\ncurs.execute(create_demo_data)\n\n# INSERT INTO demo VALUES to create new values of columns\ninsert = \"INSERT INTO demo VALUES ('g', 3, 9), ('v', 5, 7),('f', 8, 7);\"\n\n# Execute to insert into demo table\ncurs.execute(insert)\n\n# Query - Count how many rows\nresult_count = curs.execute('SELECT COUNT(*) FROM demo;').fetchone()\nprint('There are:', result_count[0], 'rows')\nprint('###################################')\n\n# Query - How many rows where both x and y are >= 5\nresult_count_5 = curs.execute('SELECT COUNT(*) FROM demo WHERE x >=5 AND y >= 5;').fetchone()\nprint('There are:', result_count_5[0], 'rows of x and y >= 5')\nprint('###################################')\n\n# Query - How many unique values of y are there?\nresult_unique_y = curs.execute('SELECT COUNT(DISTINCT y) FROM demo;').fetchone()\nprint('There are:', result_unique_y[0], 'unique values of y')\nprint('###################################')" }, { "alpha_fraction": 0.6904506683349609, "alphanum_fraction": 0.707617998123169, "avg_line_length": 19.955055236816406, "blob_id": "33bbe1309e1a11cd2bab107a330678bca4b75216", "content_id": "03fcf221e9551222254134959bd6592c6797cd5c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1864, "license_type": "permissive", "max_line_length": 79, "num_lines": 89, "path": "/module1-introduction-to-sql/lecture.py", "repo_name": "dustiny5/DS-Unit-3-Sprint-2-SQL-and-Databases", "src_encoding": "UTF-8", "text": "import sqlite3\n\n# Database\nconn = sqlite3.connect('rpg_db.sqlite3')\n\nconn\n# Goes through the database - can have multiple\ncurs = conn.cursor()\n\ncurs\n\n# Make a query\nquery = 'SELECT COUNT(*) FROM armory_item;'\n\n# Save results of query\nresult = curs.execute(query)\nresult\n\n# Fetch all results from the query\nresult.fetchall()\n\n\n# SELECT * selects all\nquery = 'SELECT * FROM armory_item;'\nresult = curs.execute(query)\nresult.fetchall()\n\n# Fetch one result from the query\nresult = curs.execute(query)\n# Once fetched then it'll won't be in there.\nresult.fetchone()\n\n############## DB Browser for SQLite ###########\n'''\n# Select all columns from the armory_item table\nSELECT * FROM armory_item\n\n# Select the table where item_id is 167, 45, 27\nWHERE item_id IN(167, 45, 27);\n\n\n'''\n\n############## Create a new table ###############\n\nimport sqlite3\nconn = sqlite3.connect('toy_db_sqlite3')\n\n# Create a table, toy_id, \ncreate_statement = '''CREATE TALBE toy (\n toy_id integer NOT NULL PRIMARY KEY AUTOINCREMENT,\n toy_name varchar(30),\n price numeric,\n small_parts integer)'''\n\n# \ncurs = conn.cursor()\n\n# Create table\ncurs.execute(create_statement)\n\n# Create new values for the table\ninsert = \"INSERT INTO toy VALUES (1, 'Legos', 10.5, 1), (2, 'Train', 17.6, 0);\"\n# Exevute query\ncurs.execute(insert)\n\n\n# Explicit Inner Join#\n'''\nSELECT \nname, mana\nFROM charactercreator_mage\nINNER JOIN charactercreator_character\nON charactercreator_mage.character_ptr_id = \ncharactercreator_character.character_id;\n'''\n# Implicit Inner Join #\n'''\nSELECT name, mana\nFROM charactercreator_mage, charactercreator_character\nWHERE charactercreator_character.character_id =\ncharactercreator_mage.character_ptr_id;\n'''\n# Alias by using AS #\n'''\nSELECT cc.name, cm.mana\nFROM charactercreator_mage AS cm, charactercreator_character AS cc\nWHERE cc.character_id = cm.character_ptr_id;\n'''" } ]
7
nmamie/Flask-Webapplication
https://github.com/nmamie/Flask-Webapplication
57594701a5c8d8dfe348c1bb202694d40696ab88
b243d0ea1983f0768615ce9d12786d302c1eda26
c2df0f7aa2db749b5a3b237d1c00a47ce38a3c26
refs/heads/main
2023-03-19T01:21:46.761850
2021-03-13T17:39:24
2021-03-13T17:39:24
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5322425365447998, "alphanum_fraction": 0.5466794967651367, "avg_line_length": 31.484375, "blob_id": "0b028607eb009647fc6a4148b99127e681f50137", "content_id": "691f468cd06bd0269af63974214e869fea532e23", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 2078, "license_type": "no_license", "max_line_length": 312, "num_lines": 64, "path": "/templates/about.html", "repo_name": "nmamie/Flask-Webapplication", "src_encoding": "UTF-8", "text": "{% extends \"layout.html\" %}\n\n{% block title %}\n About\n{% endblock %}\n\n{% block main %}\n\n<!-- dark mode -->\n{% if not dbase %}\n <h4>You have no users in database.</h4>\n{% endif %}\n{% if dbase %}\n {% for dbase_item in dbase %}\n\n {% if not dbase_item.mode %}\n <script>\n window.onload = function() {\n myFunction();\n };\n </script>\n\n {% endif %}\n\n {% endfor %}\n{% endif %}\n\n<!-- OMEGA Description -->\n <section class=\"container\" style=\"max-width:600px\">\n <h1 class=\"center\">OMEGA Rackets</h1>\n <p class=\"center\"><i>We strive for an internationally connected Racketlon community</i></p>\n <p class=\"justify\">The team behind OMEGA Rackets consists of the brothers Noah Mamié and Léon Mamié. Originating from the difficult time period during the COVID-19 pandemic in 2020, the idea behind this initiative is to promote Racketlon on an international scale and connect the members of this community.</p>\n </section>\n\n<!-- The Team Section -->\n <div class=\"row\">\n <div class=\"col-sm-6\">\n <p class=\"text-center\"><strong>Noah Mamié</strong></p><br>\n <a href=\"#demo\" data-toggle=\"collapse\">\n <img src=\"/static/Noah2.jpg\" class=\"img-circle person\" alt=\"Random Name\">\n </a>\n <div id=\"demo\" class=\"collapse\">\n <p>Co-Founder</p>\n <p>HSG Student and Racketlon enthusiast</p>\n <p>NLA player at Go For 4</p>\n <p>CH Ranking #15</p>\n <p>Member since 2020</p>\n </div>\n </div>\n <div class=\"col-sm-6\">\n <p class=\"text-center\"><strong>Léon Mamie</strong></p><br>\n <a href=\"#demo\" data-toggle=\"collapse\">\n <img src=\"/static/Leon.jpg\" class=\"img-circle person\" alt=\"Random Name\">\n </a>\n <div id=\"demo\" class=\"collapse\">\n <p>Co-Founder</p>\n <p>ETH Student and Racketlon enthusiast</p>\n <p>NLA player at Go For 4</p>\n <p>CH Ranking #29</p>\n <p>Member since 2020</p>\n </div>\n </div>\n </div>\n{% endblock %}" }, { "alpha_fraction": 0.5973207354545593, "alphanum_fraction": 0.6051133871078491, "avg_line_length": 31.448863983154297, "blob_id": "7e2c5882b474d81350962dde2b0150a44bc980c4", "content_id": "f312533297a31c2aef55096f8130f2808d610834", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11421, "license_type": "no_license", "max_line_length": 219, "num_lines": 352, "path": "/application.py", "repo_name": "nmamie/Flask-Webapplication", "src_encoding": "UTF-8", "text": "import os\nimport os.path\nfrom os import path\nimport camelot\nimport csv\nimport requests\n\nfrom cs50 import SQL\nfrom flask import Flask, flash, jsonify, redirect, render_template, request, session\nfrom flask_session import Session\nfrom tempfile import mkdtemp\nfrom werkzeug.exceptions import default_exceptions, HTTPException, InternalServerError\nfrom werkzeug.security import check_password_hash, generate_password_hash\n\nfrom helpers import apology, login_required\n\n# configure application\napp = Flask(__name__)\n\n# ensuring templates are auto-reloaded\napp.config[\"TEMPLATES_AUTO_RELOAD\"] = True\n\n# ensuring responses aren't cached\[email protected]_request\ndef after_request(response):\n response.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n response.headers[\"Expires\"] = 0\n response.headers[\"Pragma\"] = \"no-cache\"\n return response\n\n# configure session to use filesystem (instead of signed cookies)\napp.config[\"SESSION_FILE_DIR\"] = mkdtemp()\napp.config[\"SESSION_PERMANENT\"] = False\napp.config[\"SESSION_TYPE\"] = \"filesystem\"\nSession(app)\n\n# configure lib to use slqite3 database\ndb = SQL(\"sqlite:///sports.db\")\n\n# configure web application routes\[email protected](\"/\")\n@login_required\ndef index():\n \"show sports homepage\"\n\n dbase = db.execute(\"SELECT * FROM users WHERE id = :id\", id=session[\"user_id\"])\n\n return render_template(\"index.html\", dbase=dbase)\n\[email protected](\"/analytics\")\n@login_required\ndef analytics():\n \"show player analytics\"\n\n dbase = db.execute(\"SELECT * FROM users WHERE id = :id\", id=session[\"user_id\"])\n user = dbase[0][\"id\"]\n name = dbase[0][\"name\"]\n\n pbase = db.execute(\"SELECT * FROM players\")\n\n if len(pbase) == 0:\n # read csv into database\n with open(\"ranking.csv\", \"r\") as ranking:\n reader = csv.DictReader(ranking, delimiter=\",\")\n\n for row in reader:\n\n players = []\n\n players.append(row[\"Rackets\"])\n players.append(row[\"Rank\"])\n players.append(row[\"Name\"])\n players.append(row[\"Prev\"])\n players.append(row[\"ID\"])\n players.append(row[\"Points\"])\n\n db.execute(\"INSERT INTO players (Rackets, Rank, Name, Surname, ID, Points) VALUES(?, ?, ?, ?, ?, ?)\", players[0], players[1], players[2], players[3], players[4], players[5])\n\n db_player = db.execute(\"SELECT * FROM history WHERE user_id = :id\", id = user)\n if len(db_player) == 0:\n month = 0\n for i in range(12):\n month += 1\n if path.exists(\"ranking\" + str(month) + \".csv\") == True:\n with open(\"ranking\" + str(month) + \".csv\", \"r\") as ranking:\n reader = csv.DictReader(ranking, delimiter=\",\")\n\n for row in reader:\n\n if row[\"Name\"] == name:\n history = []\n\n history.append(row[\"Rackets\"])\n history.append(row[\"Rank\"])\n history.append(row[\"Name\"])\n history.append(row[\"ID\"])\n history.append(row[\"Points\"])\n\n db.execute(\"INSERT INTO history (user_id, month, rackets, rank, name, player_id, points) VALUES(?, ?, ?, ?, ?, ?, ?)\", user, month, history[0], history[1], history[2], history[3], history[4])\n db_player = db.execute(\"SELECT * FROM history WHERE user_id = :id\", id = user)\n else:\n print(path.exists(\"ranking\" + str(month) + \".csv\"))\n\n year = []\n month = []\n rank = []\n for i in range(len(db_player)):\n y = 2020\n m = db_player[i][\"month\"]\n year.append(y)\n month.append(m)\n rank.append(db_player[i][\"rank\"])\n m += 1\n\n return render_template(\"analytics.html\", dbase=dbase, pbase=pbase, year=year, month=month, rank=rank)\n\[email protected](\"/tour\")\n@login_required\ndef tour():\n \"show info about tour\"\n\n dbase = db.execute(\"SELECT * FROM users WHERE id = :id\", id=session[\"user_id\"])\n\n\n return render_template(\"tour.html\", dbase=dbase)\n\[email protected](\"/history\")\n@login_required\ndef history():\n \"show info about history\"\n\n dbase = db.execute(\"SELECT * FROM users WHERE id = :id\", id=session[\"user_id\"])\n\n return render_template(\"history.html\", dbase=dbase)\n\[email protected](\"/about\")\n@login_required\ndef about():\n \"show info about team\"\n\n dbase = db.execute(\"SELECT * FROM users WHERE id = :id\", id=session[\"user_id\"])\n\n return render_template(\"about.html\", dbase=dbase)\n\n\[email protected](\"/profile\")\n@login_required\ndef profile():\n \"\"\"Show user profile and options\"\"\"\n\n dbase = db.execute(\"SELECT * FROM users WHERE id = :id\", id=session[\"user_id\"])\n\n # player rating\n dbase = db.execute(\"SELECT * FROM users WHERE id = :id\", id=session[\"user_id\"])\n name = dbase[0][\"name\"]\n player = db.execute(\"SELECT * FROM players WHERE Name = :name\", name = name)\n\n return render_template(\"profile.html\", dbase=dbase, player=player)\n\n\[email protected](\"/password\", methods=[\"GET\", \"POST\"])\n@login_required\ndef password():\n \"\"\"Change password\"\"\"\n dbase = db.execute(\"SELECT * FROM users WHERE id = :id\", id=session[\"user_id\"])\n\n # User reached route via GET\n if request.method == \"GET\":\n return render_template(\"password.html\", dbase=dbase)\n # User reached route via POST (as by submitting a form via POST)\n else:\n\n # Ensure old password was submitted\n if not request.form.get(\"old\"):\n return apology(\"must provide old password\", 403)\n\n # Ensure new password was submitted\n if not request.form.get(\"new\"):\n return apology(\"must provide new password\", 403)\n\n # Ensure confirmation was submitted\n elif not request.form.get(\"confirmation\"):\n return apology(\"must provide confirmation\", 403)\n\n # Ensure passwords match\n elif request.form.get(\"new\") != request.form.get(\"confirmation\"):\n return apology(\"passwords do not match\", 403)\n\n # Check user database for password hash\n user_db = db.execute(\"SELECT * FROM users WHERE id = :id\", id=session[\"user_id\"])\n for i in range(len(user_db)):\n pw_hash = user_db[i][\"hash\"]\n\n old = request.form.get(\"old\")\n new = request.form.get(\"new\")\n new_hash = generate_password_hash(request.form.get(\"new\"))\n\n # Ensure username exists and password is correct\n if not check_password_hash(pw_hash, old):\n return apology(\"old password is incorrect\", 403)\n\n # ensure new password is not the same as old password\n elif old == new:\n return apology(\"new password is same as old\", 403)\n\n else:\n # Update the password for this user\n db.execute(\"UPDATE users SET hash = :hash WHERE id = :id\", hash = new_hash, id = session[\"user_id\"])\n\n # Redirect user back to profile\n return redirect(\"/profile\")\n\[email protected](\"/success\")\n@login_required\ndef success():\n \"\"\"Successful dark/light mode\"\"\"\n\n dbase = db.execute(\"SELECT * FROM users WHERE id = :id\", id=session[\"user_id\"])\n dmode = dbase[0][\"mode\"]\n\n if dmode == 1:\n # Update the mode for this user\n db.execute(\"UPDATE users SET mode = :mode WHERE id = :id\", mode = 0, id = session[\"user_id\"])\n dmode = 0\n else:\n # Update the mode for this user\n db.execute(\"UPDATE users SET mode = :mode WHERE id = :id\", mode = 1, id = session[\"user_id\"])\n dmode = 1\n\n return render_template(\"success.html\", dbase=dbase)\n\n\n\[email protected](\"/login\", methods=[\"GET\", \"POST\"])\ndef login():\n \"\"\"Log user in\"\"\"\n\n # Forget any user_id\n session.clear()\n\n # User reached route via POST (as by submitting a form via POST)\n if request.method == \"POST\":\n\n # Ensure username was submitted\n if not request.form.get(\"username\"):\n return apology(\"must provide username\", 403)\n\n # Ensure password was submitted\n elif not request.form.get(\"password\"):\n return apology(\"must provide password\", 403)\n\n # Query database for username\n rows = db.execute(\"SELECT * FROM users WHERE username = :username\",\n username=request.form.get(\"username\"))\n\n # Ensure username exists and password is correct\n if len(rows) != 1 or not check_password_hash(rows[0][\"hash\"], request.form.get(\"password\")):\n return apology(\"invalid username and/or password\", 403)\n\n # Remember which user has logged in\n session[\"user_id\"] = rows[0][\"id\"]\n\n # Redirect user to home page\n return redirect(\"/\")\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n return render_template(\"login.html\")\n\n\[email protected](\"/logout\")\ndef logout():\n \"\"\"Log user out\"\"\"\n\n # Forget any user_id\n session.clear()\n\n # Redirect user to login form\n return redirect(\"/\")\n\n\[email protected](\"/register\", methods=[\"GET\", \"POST\"])\ndef register():\n \"\"\"Register user\"\"\"\n\n # User reached route via GET\n if request.method == \"GET\":\n return render_template(\"register.html\")\n\n # User reached route via POST (as by submitting a form via POST)\n else:\n\n # Ensure username was submitted\n if not request.form.get(\"first\"):\n return apology(\"must provide first name\", 403)\n\n # Ensure username was submitted\n if not request.form.get(\"last\"):\n return apology(\"must provide last name\", 403)\n\n # Ensure username was submitted\n if not request.form.get(\"username\"):\n return apology(\"must provide username\", 403)\n\n # Ensure password was submitted\n elif not request.form.get(\"password\"):\n return apology(\"must provide password\", 403)\n\n # Ensure confirmation was submitted\n elif not request.form.get(\"confirmation\"):\n return apology(\"must provide password\", 403)\n\n # Ensure passwords match\n elif request.form.get(\"password\") != request.form.get(\"confirmation\"):\n return apology(\"passwords do not match\", 403)\n\n hash = generate_password_hash(request.form.get(\"password\"))\n\n # Check user database for username\n user_db = db.execute(\"SELECT * FROM users WHERE username = :username\", username=request.form.get(\"username\"))\n if len(user_db) == 1:\n\n username = user_db[0][\"username\"]\n\n # Ensure the username is not in the database already\n if request.form.get(\"username\") == username:\n return apology(\"username is already registered\", 403)\n\n else:\n # Add new user to database if not existing yet\n name = request.form.get(\"last\") + \"\\n\" + request.form.get(\"first\")\n mode = 1\n user = db.execute(\"INSERT INTO users (username, hash, name, mode) VALUES (:username, :hash, :name, :mode)\", username=request.form.get(\"username\"), hash=hash, name=name, mode=mode)\n\n # remember which user has logged in\n session[\"user_id\"] = user\n\n\n # Redirect user to home page\n return redirect(\"/\")\n\n\ndef errorhandler(e):\n \"\"\"Handle error\"\"\"\n if not isinstance(e, HTTPException):\n e = InternalServerError()\n return apology(e.name, e.code)\n\n\n# listen for errors\nfor code in default_exceptions:\n app.errorhandler(code)(errorhandler)" }, { "alpha_fraction": 0.7740113139152527, "alphanum_fraction": 0.7814265489578247, "avg_line_length": 42.56922912597656, "blob_id": "8ed0c276d1d77212530d900dac57014308d0d21e", "content_id": "570fe632e34afc3ed67f575e0685e009f6ec980d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2833, "license_type": "no_license", "max_line_length": 200, "num_lines": 65, "path": "/README.md", "repo_name": "nmamie/Flask-Webapplication", "src_encoding": "UTF-8", "text": "# CS50x: Final Project - Flask Web App #\n\n<br />\n\n<p align=\"center\">\nNoah Mamié <br />\nLéon Mamié <br />\n</p>\n\n<p align=\"center\">\n<b>CS50x: Computer Science and the Art of Programming</b> <br />\nFall 2020 <br />\nDecember 30, 2020\n</p>\n\n<p align=\"center\">\n<b>Professor:</b> David J. Malan <br />\n<b>Team:</b> OMEGA Rackets <br />\nPython, HTML/CSS/JS\n</p>\n<br />\n\n\n## Introduction\n\nThe vision of OMEGA Rackets is to promote the beautiful sport of Racketlon around the globe and connect enthusiasts to make our community even greater.\nAs racket sports enthusiasts, the brothers Léon and Noah Mamié have decided to team up and create a webpage featuring Racketlon-related content only.\nWe provide players with analytics on their athlete profile, an outlook on the FIR World Tour, the history of this sport and, ultimately, a presentation of our team.\n\n## Analytics\n\nThe analytics section features a personalized ranking history in the form of a line chart.\nThis graphic aims at providing Racketlon players with an idea of their personal development over the last months.\nFurthermore, this page contains a ranking table consisting of the top Racketlon players for demonstration.\n\n## Tour\n\nThe tour section aims at providing members of our community with an overview of the FIR Racketlon Tour, allowing them to make up their minds on which tournaments they wish to participate at.\nUltimately, we hope to spread awareness of this beautiful sport with initiatives like these, resulting in an increasing Racketlon community that is connected all around the globe.\n\n## History\n\nThe tour section walks Racketlon enthusiasts through the most important steps in the past of this still young sport.\nStarting out in the 1980s, Racketlon has seen a tremendous development over the years and has recently sparked some interest with professional athletes from its individual sports.\nThereby, names such as Alison Waters, Nick Matthew and Pongfinity member Otto Tennilä have graced this sport with their presence.\n\n## About\nThe about section introduces the team behind OMEGA rackets and provides information on the individual members' backgrounds.\n\n## More Features\nAdditionally, the the website has been equipped with some trending features to enhance the user experience even further.\nSpecifically, apart from gaining insights into their personal ranking history, registered users may switch between dark and light mode (default: dark) and change their password in the profile section.\nLast but not least, the profile section also contains specific player stats that are tailored to the individual users.\n\n\nOverall, we feel that we have learned a lot from this experience and hope to be able to transfer some of the knowledge gained to future projects.\n\n\n## Software\n\n1. Python: Flask, Camelot (for reading tables from PDFs), Requests\n\n2. HTML, CSS and JavaScript\n\n3. Leaflet (open maps)\n" } ]
3
cansarigol/django-daterangefilter
https://github.com/cansarigol/django-daterangefilter
77fd49bebaeec51100a3b0e09f95015591de9ee7
41f0cc606f6623716335f0d60fa18bae07282f66
fa6fdb16dcaf29140c4ec6d24a60481df44f1696
refs/heads/master
2022-12-09T10:42:17.359999
2020-08-30T13:05:55
2020-08-30T13:05:55
291,471,706
0
0
null
2020-08-30T13:04:11
2020-07-28T15:23:52
2019-01-21T12:36:13
null
[ { "alpha_fraction": 0.8550724387168884, "alphanum_fraction": 0.8550724387168884, "avg_line_length": 68, "blob_id": "5db45bff9b10124cd49e6027eddc3a7be2a87c41", "content_id": "ec3c6342a08437cbb6a27d8b3a3d4dca937dec70", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 69, "license_type": "permissive", "max_line_length": 68, "num_lines": 1, "path": "/daterangefilter/__init__.py", "repo_name": "cansarigol/django-daterangefilter", "src_encoding": "UTF-8", "text": "default_app_config = 'daterangefilter.apps.DateRangeFilterAppConfig'\n" }, { "alpha_fraction": 0.7573601007461548, "alphanum_fraction": 0.7715949416160583, "avg_line_length": 47.296875, "blob_id": "bcff5bd088f64aca105d23abb19a1e47654f5237", "content_id": "6891159a839f57e4fc0c5f202568f3480abd2af9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3091, "license_type": "permissive", "max_line_length": 443, "num_lines": 64, "path": "/README.md", "repo_name": "cansarigol/django-daterangefilter", "src_encoding": "UTF-8", "text": "# Django admin date range filter\n\n[![Build Status](https://travis-ci.org/andreynovikov/django-daterangefilter.svg?branch=master)](https://travis-ci.org/andreynovikov/django-daterangefilter)\n[![GitHub release](https://img.shields.io/github/release/andreynovikov/django-daterangefilter.svg)](https://github.com/andreynovikov/django-daterangefilter/releases/latest)\n[![PyPI release](https://img.shields.io/pypi/v/django-daterangefilter.svg)](https://pypi.org/project/django-daterangefilter/)\n[![Python version](https://img.shields.io/pypi/pyversions/django-daterangefilter.svg)](https://pypi.org/project/django-daterangefilter/)\n[![GitHub issues](https://img.shields.io/github/issues/andreynovikov/django-daterangefilter.svg)](https://github.com/andreynovikov/django-daterangefilter/issues)\n[![Code quality](https://img.shields.io/codacy/grade/e90b9f21941a4d8c93edb4a58caa3667.svg)](https://www.codacy.com/app/novikov/django-daterangefilter)\n[![Coverage](https://img.shields.io/codacy/coverage/e90b9f21941a4d8c93edb4a58caa3667.svg)](https://www.codacy.com/app/novikov/django-daterangefilter)\n[![GitHub license](https://img.shields.io/github/license/andreynovikov/django-daterangefilter.svg)](LICENSE)\n\nApplication adds three Django admin list filters: ```DateRangeFilter```, ```PastDateRangeFilter``` and ```FutureDateRangeFilter```. These filters let user filter models by date range. ```PastDateRangeFilter``` and ```FutureDateRangeFilter``` add quick selection of predefined date ranges. Filters can be applied to any model date fields. Application supports default Django admin theme and [Suit theme](https://github.com/darklow/django-suit).\n\n![Admin screenshot](https://raw.githubusercontent.com/andreynovikov/django-daterangefilter/master/screenshot-admin.png)\n\n## Requirements\n\n* Python 2.7+ or Python 3.3+\n* Django 1.9+\n\n## Installation\n\nInstall ```django-daterangefilter``` using pip:\n\n```shell\npip install django-daterangefilter\n```\n\nAdd ```daterangefilter``` to ```INSTALLED_APPS```. Example:\n\n```python\nINSTALLED_APPS = (\n ...\n 'daterangefilter',\n ...\n)\n```\n\nApplication uses static files so do not forget to issue ```collectstatic``` management command in production environment.\n\n## Example usage\n\nin admin.py:\n\n```python\nfrom django.contrib import admin\nfrom daterangefilter.filters import PastDateRangeFilter, FutureDateRangeFilter\n\[email protected](Post)\nclass PostAdmin(admin.ModelAdmin):\n list_filter = [\n ('created_at', PastDateRangeFilter), ('publish_at', FutureDateRangeFilter)\n ]\n```\n\n## Advanced usage\n\nPredefined ranges can be completely redefined by overriding ```_past_ranges.html``` and ```_future_ranges.html``` templates.\nTake into account that these templates are inserted in the middle of the javascript code and may contain nothing but ranges\ndefinition. For more examples on using ```moment``` library refer to [library documentation](https://momentjs.com/docs/#/manipulating/).\n\n## Credits\n\nFilter widget uses a great JavaScript date range picker component - [Date Range Picker](https://github.com/dangrossman/daterangepicker) by Dan Grossman.\n" }, { "alpha_fraction": 0.6400741934776306, "alphanum_fraction": 0.6456400752067566, "avg_line_length": 32.6875, "blob_id": "bf0afed53d60c20a2f25f3f203049d70a70f4471", "content_id": "f92c985caa8378254beea18107c7528b2647badd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1078, "license_type": "permissive", "max_line_length": 83, "num_lines": 32, "path": "/setup.py", "repo_name": "cansarigol/django-daterangefilter", "src_encoding": "UTF-8", "text": "import setuptools\n\nwith open('README.md', 'r') as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name='django-daterangefilter',\n version='1.0.0',\n license='MIT',\n author='Andrey Novikov',\n author_email='[email protected]',\n description='Date range filter for Django admin',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/andreynovikov/django-daterangefilter/tree/master',\n project_urls={\n 'Source': 'https://github.com/andreynovikov/django-daterangefilter/',\n 'Tracker': 'https://github.com/andreynovikov/django-daterangefilter/issues'\n },\n packages=setuptools.find_packages(),\n include_package_data=True,\n zip_safe=False,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Framework :: Django',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n ],\n)\n" }, { "alpha_fraction": 0.6098495721817017, "alphanum_fraction": 0.6119139194488525, "avg_line_length": 43.6184196472168, "blob_id": "9fda87128b6c3ca97416532c2497b745de8bf008", "content_id": "4b9e6f0c03303d0565c7e6c8735d43b7f6b42dae", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3391, "license_type": "permissive", "max_line_length": 161, "num_lines": 76, "path": "/daterangefilter/filters.py", "repo_name": "cansarigol/django-daterangefilter", "src_encoding": "UTF-8", "text": "import datetime\n\nfrom django.db import models\nfrom django.contrib import admin, messages\nfrom django.utils import timezone\nfrom django.utils.translation import gettext_lazy as _\nfrom django.conf import settings\n\n\nclass DateRangeFilter(admin.FieldListFilter):\n def __init__(self, field, request, params, model, model_admin, field_path):\n self.field_name = field_path\n self.lookup_kwarg_gte = '{}__gte'.format(field_path)\n self.lookup_kwarg_lte = '{}__lte'.format(field_path)\n self.lookup_gte = params.get(self.lookup_kwarg_gte)\n self.lookup_lte = params.get(self.lookup_kwarg_lte)\n # todo: check if this is required in default admin\n if self.lookup_gte == '':\n params.pop(self.lookup_kwarg_gte)\n if self.lookup_lte == '':\n params.pop(self.lookup_kwarg_lte)\n if self.lookup_gte and self.lookup_lte:\n self.lookup_val = '{} - {}'.format(self.lookup_gte, self.lookup_lte)\n # if we are filtering DateTimeField we should add one day to final date\n if \"__\" in field_path:\n related_model, field = field_path.split(\"__\")\n field = model._meta.get_field(related_model).related_model._meta.get_field(field)\n else:\n field = model._meta.get_field(field_path)\n \n if isinstance(field, models.DateTimeField):\n try:\n gte_date = datetime.datetime.strptime(self.lookup_gte, '%Y-%m-%d')\n lte_date = datetime.datetime.strptime(self.lookup_lte, '%Y-%m-%d')\n lte_date = lte_date + datetime.timedelta(seconds=3600*24-1)\n if settings.USE_TZ:\n gte_date = timezone.make_aware(gte_date, timezone.get_current_timezone())\n lte_date = timezone.make_aware(lte_date, timezone.get_current_timezone())\n params[self.lookup_kwarg_gte] = gte_date.strftime('%Y-%m-%d %H:%M:%S%z')\n params[self.lookup_kwarg_lte] = lte_date.strftime('%Y-%m-%d %H:%M:%S%z')\n except ValueError:\n messages.add_message(request, messages.ERROR, _(\"Invalid date for '%(field_name)s' field range filter\") % {'field_name': field.verbose_name})\n else:\n self.lookup_val = ''\n super(DateRangeFilter, self).__init__(field, request, params, model, model_admin, field_path)\n\n def get_template(self):\n if 'suit' in settings.INSTALLED_APPS:\n return 'daterangefilter/suit_daterangefilter.html'\n else:\n return 'daterangefilter/daterangefilter.html'\n\n def choices(self, changelist):\n yield {\n 'field_name': self.field_path,\n 'ranges_template': self.ranges_template,\n 'value': self.lookup_val,\n 'query_string': changelist.get_query_string(remove=self._get_expected_fields())\n }\n\n def expected_parameters(self):\n return self._get_expected_fields()\n\n def _get_expected_fields(self):\n return [self.lookup_kwarg_gte, self.lookup_kwarg_lte]\n\n template = property(get_template)\n ranges_template = None\n\n\nclass FutureDateRangeFilter(DateRangeFilter):\n ranges_template = 'daterangefilter/_future_ranges.html'\n\n\nclass PastDateRangeFilter(DateRangeFilter):\n ranges_template = 'daterangefilter/_past_ranges.html'\n" }, { "alpha_fraction": 0.654275119304657, "alphanum_fraction": 0.654275119304657, "avg_line_length": 47.90909194946289, "blob_id": "af54435b210a6d654944fe4e4ca1f9be2a63a864", "content_id": "44de1457f1b4cc2e6fd6e30c9346627b641e5475", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 538, "license_type": "permissive", "max_line_length": 97, "num_lines": 11, "path": "/daterangefilter/templates/daterangefilter/_load_momentjs.html", "repo_name": "cansarigol/django-daterangefilter", "src_encoding": "UTF-8", "text": "{% load static %}\n{% if LANGUAGE_CODE != 'en-us' %}\n {% if LANGUAGE_CODE in 'ru,' %}{# we use prefetched locale files for available translations #}\n<script src=\"{% static 'daterangefilter/moment/moment.min.js' %}\"></script>\n<script src=\"{% static 'daterangefilter/moment/locale/'|add:LANGUAGE_CODE|add:'.js' %}\"></script>\n {% else %}\n<script src=\"{% static 'daterangefilter/moment/moment-with-locales.min.js' %}\"></script>\n {% endif %}\n{% else %}\n<script src=\"{% static 'daterangefilter/moment/moment.min.js' %}\"></script>\n{% endif %}\n" }, { "alpha_fraction": 0.7669903039932251, "alphanum_fraction": 0.7669903039932251, "avg_line_length": 28.428571701049805, "blob_id": "c93c25970dc4286b51008c9b3b335bd887ebf24d", "content_id": "e374891407a1a1572ec607597e3ce173fa0594ef", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 206, "license_type": "permissive", "max_line_length": 55, "num_lines": 7, "path": "/daterangefilter/apps.py", "repo_name": "cansarigol/django-daterangefilter", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass DateRangeFilterAppConfig(AppConfig):\n name = 'daterangefilter'\n verbose_name = _('Date Range Filter')\n" } ]
6
mholwill/functions_lab_with_malcolm
https://github.com/mholwill/functions_lab_with_malcolm
f085037344fe4d711a1caff5c813eebb73028ba3
614c12219396e160a32a16bbc984187eb0dcf2e1
1eb2c9df52fa433941453440f86db3536472b0b0
refs/heads/main
2023-03-09T17:56:55.579884
2021-02-24T16:13:31
2021-02-24T16:13:31
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.554347813129425, "alphanum_fraction": 0.6413043737411499, "avg_line_length": 17.600000381469727, "blob_id": "a5eba1fddb8961b36a1fe60d9b31725066830afe", "content_id": "55775ef1d3b0e25c6cb23e465008fabcca0f0473", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 92, "license_type": "no_license", "max_line_length": 31, "num_lines": 5, "path": "/src/python_functions_practice.py", "repo_name": "mholwill/functions_lab_with_malcolm", "src_encoding": "UTF-8", "text": "def return_10():\n return 10\n\ndef add(number_1, number_2):\n return number_1 + number_2" } ]
1
DanTulovsky/game
https://github.com/DanTulovsky/game
74a816d543c3aa9082c3fe37b433a5492d724574
6784d65bb702c0edd049afc491f88f9e68aeba70
b4239281a768ed1d0a9b572c51c1d1ad1e6582a1
refs/heads/master
2019-06-16T17:41:59.040463
2013-03-13T02:11:50
2013-03-13T02:11:50
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5727847814559937, "alphanum_fraction": 0.577531635761261, "avg_line_length": 16.55555534362793, "blob_id": "49a5d8fafa4cf783b894aa8d6e73987b4f585659", "content_id": "2f53021f1cd8695737b40f6154737ef91f3bb710", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 632, "license_type": "no_license", "max_line_length": 47, "num_lines": 36, "path": "/src/teams.py", "repo_name": "DanTulovsky/game", "src_encoding": "UTF-8", "text": "# Teams.\n\nfrom colors import *\n\n\n\n\nclass Team(object):\n\n def __init__(self, color, strength=0):\n \"\"\"Init.\n\n Args:\n color: (colors.Color) Team color\n \"\"\"\n self.color = color\n self.members = set()\n # as integer; 0 is lowest\n self.strength = strength\n\n def Join(self, square):\n \"\"\"Join this team.\"\"\"\n self.members.add(square)\n\n def Leave(self, square):\n \"\"\"Leave the team.\"\"\"\n try:\n self.members.remove(square)\n except KeyError:\n # already removed\n pass\n\n\nTEAMS = {\"red\": Team(Colors.red),\n \"blue\": Team(Colors.blue, strength=1),\n \"green\": Team(Colors.green)}\n" }, { "alpha_fraction": 0.6191161274909973, "alphanum_fraction": 0.6320657730102539, "avg_line_length": 24.87765884399414, "blob_id": "4b646a4d6e5fcc6b6a5bd3230823cc69abc673c5", "content_id": "39abee76541bdaed0d689fd01256f41ec73abc1b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4865, "license_type": "no_license", "max_line_length": 73, "num_lines": 188, "path": "/src/square.py", "repo_name": "DanTulovsky/game", "src_encoding": "UTF-8", "text": "# This is a citizen of the world.\n\nimport threading\n\nfrom colors import *\nimport random\nimport teams\n\n\nclass Error(Exception):\n \"\"\"General error.\"\"\"\n\n\nclass TeamError(Error):\n \"\"\"Raised on invalids team.\"\"\"\n\n\nclass Square(object):\n \"\"\"One square.\"\"\"\n\n @property\n def above(self):\n \"\"\"Returns coordinates of square above this one.\"\"\"\n return (self.position[0], self.position[1] - 1)\n\n @property\n def below(self):\n \"\"\"Returns coordinates of square below this one.\"\"\"\n return (self.position[0], self.position[1] + 1)\n\n @property\n def right(self):\n \"\"\"Returns coordinates of square to the right of this one.\"\"\"\n return (self.position[0] + 1, self.position[1])\n\n @property\n def left(self):\n \"\"\"Returns coordinates of square to the left of this one.\"\"\"\n return (self.position[0] - 1, self.position[1])\n\n @property\n def upperleft(self):\n \"\"\"Returns coordinates of square to the upper left of this one.\"\"\"\n return (self.position[0] - 1, self.position[1] - 1)\n\n @property\n def upperright(self):\n \"\"\"Returns coordinates of square to the upper right of this one.\"\"\"\n return (self.position[0] + 1, self.position[1] - 1)\n\n @property\n def lowerleft(self):\n \"\"\"Returns coordinates of square to the lower left of this one.\"\"\"\n return (self.position[0] - 1, self.position[1] + 1)\n\n @property\n def lowerright(self):\n \"\"\"Returns coordinates of square to the lower right of this one.\"\"\"\n return (self.position[0] + 1, self.position[1] + 1)\n\n @property\n def color(self):\n with self._data_lock:\n return self._color\n\n @color.setter\n def color(self, value):\n with self._data_lock:\n self._color = value\n\n def __init__(self, color=Colors.white, position=(0,0), team=None):\n \"\"\"Initial state of a square.\n\n The color is either Colors.black or green. The position\n is (top, left) tuple, but counting squares, not pixels.\n\n So position (1, 1) is actually (10, 10) in pixels. (1 * square_width)\n\n Args:\n color: (string) initial color of the square; b/w\n positions: ((int, int) position in the world (not px)\n team: (teams.Team) the team this square belongs to\n \"\"\"\n self._color = color\n self.tint = Colors.custom(0, 0, 0)\n self.position = position\n self.age = 0 # number of generations this has been alive.\n self._data_lock = threading.Lock()\n self.team = team\n self.tint_increase = 2 # how quickly the colors change\n\n # next generation\n self.color_new = None\n self.tint_new = None\n self.age_new = None\n self.killed = False\n\n # join team\n if self.team is None:\n raise TeamError(\"Team must be specified.\")\n self.team.Join(self)\n\n # tint_int value based on the team\n self._tint_int = self._GetTeamTint()\n\n def FlipColor(self):\n \"\"\"Flips the color of a square.\"\"\"\n if self.color in Colors.alive():\n self.color_new = Colors.white\n else:\n self.color_new = Colors.black\n\n def HappyBirthday(self):\n \"\"\"Adds one to the age of the Square.\"\"\"\n self.tint_new = self.tint\n self.age_new = self.age\n self.color_new = self.color\n self.age_new += 1\n\n # turn a deeper shade based on the team\n if self.tint_new[self._tint_int] + self.tint_increase < 256:\n self.tint_new[self._tint_int] += self.tint_increase\n else:\n tint = 255\n\n def Kill(self):\n \"\"\"Turns square white.\"\"\"\n self.tint_new = self.tint\n self.color_new = Colors.white\n self.tint_new = Colors.custom(0, 0, 0)\n self.age_new = 0\n self.killed = True\n\n if self.team:\n self.team.Leave(self)\n\n def ComeAlive(self, influence_team=None):\n \"\"\"Makes white square come alive -> black.\n\n Args:\n influence_team: (Team) team with the most influence on this square\n \"\"\"\n self.tint_new = self.tint\n self.age_new = 0\n self.color_new = Colors.black\n self.tint_new = Colors.custom(0, 0, 0)\n\n # join team based on neighbors\n if influence_team is not None:\n self.team = influence_team\n else:\n self.team = random.choice(teams.TEAMS.values())\n print \"Joining random team...\"\n self.team.Join(self)\n self._tint_int = self._GetTeamTint()\n\n def UpdateNextGenValues(self):\n \"\"\"Cycles the square to the next generation.\"\"\"\n if self.color_new:\n self.color = self.color_new\n self.color_new = None\n\n if self.age_new:\n self.age = self.age_new\n self.age_new = None\n\n if self.tint_new:\n self.tint = self.tint_new\n self.tint_new = None\n\n if self.killed:\n self.team = None\n self.killed = False\n\n def _GetTeamTint(self):\n \"\"\"Returns the correct tint index to change based on team color.\n\n Returns:\n tint: (int) 0 = r; 2 = g; 1 = b\n \"\"\"\n if self.team.color == Colors.red:\n return 0\n\n if self.team.color == Colors.blue:\n return 2\n\n if self.team.color == Colors.green:\n return 1\n" }, { "alpha_fraction": 0.6332256197929382, "alphanum_fraction": 0.6445332169532776, "avg_line_length": 29.548051834106445, "blob_id": "89149cd77fc2ea8a559e8e6e9d04b4867ed85664", "content_id": "3bdfd8b1772bd3aeff25d21b0a7cfc178bb89ace", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11762, "license_type": "no_license", "max_line_length": 75, "num_lines": 385, "path": "/src/world_rules.py", "repo_name": "DanTulovsky/game", "src_encoding": "UTF-8", "text": "\"\"\"\nCreated on Mar 7, 2013\n\n@author: dtulovsky\n\"\"\"\n\nfrom colors import Colors\nfrom copy import *\nimport gflags\nimport logging\nimport random\n\n\nFLAGS = gflags.FLAGS\n\n# References to neighbors\nRIGHT = \"right\"\nLEFT = \"left\"\nABOVE = \"above\"\nBELOW = \"below\"\nUPPERLEFT = \"upperleft\"\nUPPERRIGHT = \"upperright\"\nLOWERLEFT = \"lowerleft\"\nLOWERRIGHT = \"lowerright\"\n\nPOSITIONS = [RIGHT, LEFT, ABOVE, BELOW, UPPERLEFT, UPPERRIGHT, LOWERLEFT,\n LOWERRIGHT]\n\n\nclass Error(Exception):\n \"\"\"General Exception.\"\"\"\n\n\nclass WorldRules(object):\n \"\"\"Defines the rules of the world. Inherit this to extend.\"\"\"\n\n def NextGen(self, square, random_events, neighbors):\n \"\"\"Returns a copy of the square for next generation.\n\n Args:\n square: (Square) square object\n squares: (dict) Squares in the world.\n next_gen: (dict) all square objects for next generation\n index: (int) index of current square in squoares\n random_events: (bool) If True, allow random events (if any)\n\n Returns:\n square: (Square) a copy of the square object with new paramters\n \"\"\"\n raise NotImplemented(\"Don't use this class directly.\")\n\n def RandomEvents(self, squares):\n \"\"\"Introduces random events.\"\"\"\n raise NotImplemented(\"Override this function if you want to use it.\")\n\n\nclass Wolfam222(WorldRules):\n \"\"\"Wolfram222.\"\"\"\n\n color_hash = {(Colors.black, Colors.black, Colors.black): Colors.black,\n (Colors.black, Colors.black, Colors.white): Colors.black,\n (Colors.black, Colors.white, Colors.black): Colors.white,\n (Colors.black, Colors.white, Colors.white): Colors.black,\n (Colors.white, Colors.black, Colors.black): Colors.black,\n (Colors.white, Colors.black, Colors.white): Colors.black,\n (Colors.white, Colors.white, Colors.black): Colors.black,\n (Colors.white, Colors.white, Colors.white): Colors.white}\n\n def NextGen(self, square, random_events, neighbors):\n \"\"\"Given a square, sets its state for the next generation.\n\n * left neighbor self right neighbor => new self\n * b b b => b\n * b b w => b\n * b w b => w\n * b w w => b\n * w b b => b\n * w b w => b\n * w w b => b\n * w w w => w\n \"\"\"\n # copy the square so we don't mix generations\n new_square = copy(square)\n\n try:\n lnc = self._NeighborColor(square, LEFT, squares)\n except (OutsideWorld, NoNeighbor) as e:\n lnc = Colors.white\n\n try:\n rnc = self._NeighborColor(square, RIGHT, squares)\n except (OutsideWorld, NoNeighbor) as e:\n rnc = Colors.white\n\n try:\n new_square.color = Wolfam222.color_hash[(lnc, square.color, rnc)]\n except KeyError, e:\n logging.error(\"Wolfram222 Combination missing (this only works with \"\n \"black and white.\")\n\n next_gen[index] = new_square\n\nclass FlipWorld(WorldRules):\n \"\"\"Flip existing squares only.\"\"\"\n\n def NextGen(self, square, random_events, neighbors):\n \"\"\"Given a square, sets its state for the next generation.\n\n * Flip the color of each existing square.\n \"\"\"\n new_square = copy(square)\n new_square.FlipColor()\n next_gen[index] = new_square\n\n\nclass FlipWorldNeighbor(WorldRules):\n \"\"\"Flip existing squares only.\"\"\"\n\n def NextGen(self, square, random_events, neighbors):\n \"\"\"Given a square, sets its state for the next generation.\n\n * Flips if the neighbors on left and right sides are the same.\n * If there are no neighbors, don't change.\n \"\"\"\n new_square = copy(square)\n try:\n lnc = self._NeighborColor(square, LEFT, squares)\n except (OutsideWorld, NoNeighbor) as e:\n # Either no neighbor or neighbor would be out of this world\n return new_square\n\n if lnc == rnc:\n new_square.FlipColor()\n\n next_gen[index] = new_square\n\n\nclass LifeWorld(WorldRules):\n \"\"\"Life world. Game of life, original rules.\"\"\"\n\n def NextGen(self, square, random_events, neighbors):\n \"\"\"Given a square, sets its state for the next generation.\n\n Supports only black/white squares.\n\n * If black square has < 2 black neighbors, turns white.\n * If black square has 2 or 3 black neighbors, remains black.\n * If black square has > 3 neighbors, turns white.\n * If white square has 3 neighbors, turns black.\n \"\"\"\n if square.color == Colors.black:\n if neighbors[\"total_neighbors\"] < 2:\n square.color_new = Colors.white\n\n if neighbors[\"total_neighbors\"] in [2, 3]:\n square.color_new = Colors.black\n\n if neighbors[\"total_neighbors\"] > 3:\n square.color_new = Colors.white\n\n if square.color == Colors.white:\n if neighbors[\"total_neighbors\"] == 3:\n square.color_new = Colors.black\n elif neighbors[\"total_neighbors\"] == 0:\n square.color_new = Colors.white\n\nclass LifeWorldColorRandom(WorldRules):\n \"\"\"Life world. Custom.\"\"\"\n\n def RandomEvents(self, square):\n \"\"\"Random event for one square.\"\"\"\n MAX_RANDOM = 10000\n\n # % chance a square will flip colors\n if random.randrange(0, MAX_RANDOM, 1) == 1:\n logging.info(\"Random event: flipping square %s.\", square.position)\n square.FlipColor()\n return square\n\n def NextGen(self, square, random_events, neighbors):\n \"\"\"Given a square, sets its state for the next generation.\n\n * If colored square has < 2 black neighbors, turns white.\n * If colored square has 2 or 3 black neighbors, remains colored (same).\n * If colored square has > 3 neighbors, turns white.\n * If white square has 3 neighbors, turns colored (random).\n \"\"\"\n\n if square.color != Colors.white: # is alive\n if neighbors[\"total_neighbors\"] < 2:\n square.color_new = Colors.white\n\n if neighbors[\"total_neighbors\"] in [2, 3]:\n square.color_new = square.color\n\n if neighbors[\"total_neighbors\"] > 3:\n square.color_new = Colors.white\n\n if square.color == Colors.white:\n if neighbors[\"total_neighbors\"] == 3:\n square.color_new = random.choice(Colors.alive())\n elif neighbors[\"total_neighbors\"] == 0:\n square.color_new = Colors.white\n\n if random_events:\n square = self.RandomEvents(square)\n\n\nclass LifeCustomWorld(WorldRules):\n \"\"\"Life world. Custom. Includes other attributes.\"\"\"\n\n def __init__(self):\n super(LifeCustomWorld, self).__init__()\n # kill if less then this number of neighbors\n self.kill_lonely_limit = 2\n # kill if more than this number of neighbors\n self.kill_crowded_limit = 3\n # come alive if this number of neighbors\n self.come_alive_neighbors = 3\n\n def RandomEvents(self, square):\n \"\"\"Random event for one square.\"\"\"\n MAX_RANDOM = 10000\n\n # % chance a square will flip colors\n if random.randrange(0, MAX_RANDOM, 1) == 1:\n logging.info(\"Random event: flipping square %s.\", square.position)\n square.FlipColor()\n\n def NextGen(self, square, random_events, neighbors):\n \"\"\"Given a square, sets its state for the next generation.\n\n * If colored square has < 2 black neighbors, turns white.\n * If colored square has 2 or 3 black neighbors, remains colored (same).\n * If colored square has > 3 neighbors, turns white.\n * If white square has 3 neighbors, turns colored (random).\n \"\"\"\n killed = False\n kill_lonely_limit = self.kill_lonely_limit\n kill_crowded_limit = self.kill_crowded_limit\n come_alive_neighbors = self.come_alive_neighbors\n\n # with age, comes strength\n if square.age > 5:\n kill_lonely_limit = kill_lonely_limit - 1\n\n if square.age > 25:\n kill_crowded_limit = kill_crowded_limit + 1\n\n# if square.age > 60:\n# kill_lonely_limit = kill_lonely_limit - 1\n#\n# if square.age > 105:\n# kill_crowded_limit = kill_crowded_limit + 1\n\n # with deep age, comes weakness\n# if square.age > 50:\n# kill_lonely_limit = kill_lonely_limit\n# kill_crowded_limit = kill_crowded_limit\n#\n# if square.age > 80:\n# kill_lonely_limit = kill_lonely_limit + 1\n# kill_crowded_limit = kill_crowded_limit - 1\n#\n# if square.age > 120:\n# kill_lonely_limit = kill_lonely_limit + 1\n# kill_crowded_limit = kill_crowded_limit - 1\n#\n# if square.age > 150:\n# kill_lonely_limit = kill_lonely_limit + 1\n# kill_crowded_limit = kill_crowded_limit - 1\n\n\n if square.color != Colors.white: # is alive\n if neighbors[\"total_neighbors\"] < kill_lonely_limit:\n square.Kill()\n killed = True\n\n if neighbors[\"total_neighbors\"] > kill_crowded_limit:\n square.Kill()\n killed = True\n\n if not killed:\n # survived this generation!\n square.HappyBirthday()\n\n if square.color == Colors.white:\n if neighbors[\"total_neighbors\"] == come_alive_neighbors:\n square.ComeAlive()\n elif neighbors[\"total_neighbors\"] == 0:\n # Removes uninteresting squares from the list\n square.color_new = Colors.white\n\n if random_events:\n self.RandomEvents(square)\n\nclass LifeCustomWorldFight(WorldRules):\n \"\"\"Life world. Custom. Includes other attributes. Team fights.\"\"\"\n\n def __init__(self):\n super(LifeCustomWorldFight, self).__init__()\n # kill if less then this number of neighbors\n self.kill_lonely_limit = 2\n # how many friends around square kill it\n self.kill_friend_crowded_limit = 6\n # how many enemies around square kill it\n self.kill_enemy_crowded = 2\n # come alive if this number of neighbors\n self.come_alive_neighbors = 2\n\n def RandomEvents(self, square):\n \"\"\"Random event for one square.\"\"\"\n MAX_RANDOM = 10000\n\n # % chance a square will flip colors\n if random.randrange(0, MAX_RANDOM, 1) == 1:\n logging.info(\"Random event: flipping square %s.\", square.position)\n square.FlipColor()\n\n def NextGen(self, square, random_events, neighbors):\n \"\"\"Given a square, sets its state for the next generation.\"\"\"\n killed = False\n kill_lonely_limit = self.kill_lonely_limit\n kill_friend_crowded_limit = self.kill_friend_crowded_limit\n kill_enemy_crowded = self.kill_enemy_crowded\n come_alive_neighbors = self.come_alive_neighbors\n\n # with age, comes strength\n #if square.age > 5:\n # kill_lonely_limit = kill_lonely_limit - 1\n\n #if square.age > 25:\n # kill_crowded_limit = kill_crowded_limit + 1\n\n# if square.age > 60:\n# kill_lonely_limit = kill_lonely_limit - 1\n#\n# if square.age > 105:\n# kill_crowded_limit = kill_crowded_limit + 1\n\n # with deep age, comes weakness\n# if square.age > 50:\n# kill_lonely_limit = kill_lonely_limit\n# kill_crowded_limit = kill_crowded_limit\n#\n# if square.age > 80:\n# kill_lonely_limit = kill_lonely_limit + 1\n# kill_crowded_limit = kill_crowded_limit - 1\n#\n# if square.age > 120:\n# kill_lonely_limit = kill_lonely_limit + 1\n# kill_crowded_limit = kill_crowded_limit - 1\n#\n# if square.age > 150:\n# kill_lonely_limit = kill_lonely_limit + 1\n# kill_crowded_limit = kill_crowded_limit - 1\n\n\n if square.color != Colors.white: # is alive\n if neighbors[\"total_neighbors\"] < kill_lonely_limit:\n square.Kill()\n killed = True\n\n if neighbors[\"friends\"] > kill_friend_crowded_limit:\n square.Kill()\n killed = True\n\n if neighbors[\"enemies\"] > kill_enemy_crowded:\n square.Kill()\n killed = True\n\n if not killed:\n # survived this generation!\n square.HappyBirthday()\n else: # is dead\n if neighbors[\"total_neighbors\"] == come_alive_neighbors:\n if neighbors[\"influence\"] is None:\n pass\n square.ComeAlive(neighbors[\"influence\"])\n elif neighbors[\"total_neighbors\"] == 0:\n # Removes uninteresting squares from the list\n square.color_new = Colors.white\n\n if random_events:\n self.RandomEvents(square)\n\n" }, { "alpha_fraction": 0.5811789035797119, "alphanum_fraction": 0.6866597533226013, "avg_line_length": 27.47058868408203, "blob_id": "9d51e86c536d707884fd64e67f7b9a26534207c5", "content_id": "c796137c1bbf4d6d49ee1cce0192027112de1430", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 967, "license_type": "no_license", "max_line_length": 109, "num_lines": 34, "path": "/src/pygame_test.py", "repo_name": "DanTulovsky/game", "src_encoding": "UTF-8", "text": "#! /usr/local/bin/python\n\nimport pygame\nimport sys\nimport time\n\nfrom pygame.locals import *\n\npygame.init()\n\nfpsClock = pygame.time.Clock()\nwindowSurfaceObj = pygame.display.set_mode((640, 480))\n\npygame.display.set_caption(\"Game...\")\n\nredColor = pygame.Color(255, 0, 0)\ngreenColor = pygame.Color(0, 255, 0)\nblueColor = pygame.Color(0, 0, 255)\nwhiteColor = pygame.Color(255, 255, 255)\n\nwhile True:\n windowSurfaceObj.fill(whiteColor)\n \n #pygame.draw.polygon(windowSurfaceObj, greenColor, ((146, 0), (291, 106), (236, 277), (56, 277), (0, 106)))\n #pygame.draw.circle(windowSurfaceObj, blueColor, (300, 50), 20, 0)\n #pygame.draw.ellipse(windowSurfaceObj, redColor, (300, 250, 40, 80), 1)\n pygame.draw.rect(windowSurfaceObj, \n redColor, (10, 10, 100, 100))\n \n #pygame.draw.line(windowSurfaceObj, blueColor, (60, 160), (120, 60), 4)\n \n #fpsClock.tick(30) # pause to run the loop at 30 frames per second\n pygame.display.update()\n time.sleep(300)" }, { "alpha_fraction": 0.6305577754974365, "alphanum_fraction": 0.6426839232444763, "avg_line_length": 17.477611541748047, "blob_id": "25b1f3186825d092d02da4db6b87180fecf8eae7", "content_id": "13054f0082370a64bf82c806e625415f46099efd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1237, "license_type": "no_license", "max_line_length": 80, "num_lines": 67, "path": "/src/colors.py", "repo_name": "DanTulovsky/game", "src_encoding": "UTF-8", "text": "\"\"\"\nCreated on Mar 7, 2013\n\n@author: dtulovsky\n\"\"\"\n\nimport gflags\nimport pygame\n\nFLAGS = gflags.FLAGS\n\ngflags.DEFINE_bool(\"black_white\", False, \"Only black/white mode.\")\n\nWHITE = pygame.Color(\"white\")\nBLACK = pygame.Color(\"black\")\nGREEN = pygame.Color(\"green\")\nRED = pygame.Color(\"red\")\nBLUE = pygame.Color(\"blue\")\nPINK = pygame.Color(255, 0 , 127)\n\n\nclass Colors(object):\n \"\"\"Colors Helper.\"\"\" \n \n @classmethod\n def all(cls):\n if FLAGS.black_white:\n return [Colors.black, Colors.white]\n else:\n return [Colors.white, Colors.black, Colors.green, Colors.red, Colors.blue,\n Colors.pink]\n \n @classmethod\n def alive(cls):\n \"\"\"Returns only alive colors. No white.\"\"\"\n if FLAGS.black_white:\n return [Colors.black]\n else:\n return [Colors.black, Colors.green, Colors.red, Colors.blue, Colors.pink]\n \n @classmethod\n def white(cls):\n return WHITE\n\n @classmethod\n def black(cls):\n return BLACK\n\n @classmethod\n def green(cls):\n return GREEN \n\n @classmethod\n def red(cls):\n return RED\n\n @classmethod\n def blue(cls):\n return BLUE\n\n @classmethod\n def pink(cls):\n return PINK\n \n @classmethod\n def custom(cls, r, g, b, a=255):\n return pygame.Color(r, g, b, a)" }, { "alpha_fraction": 0.5833257436752319, "alphanum_fraction": 0.6002179384231567, "avg_line_length": 32.67278289794922, "blob_id": "00f0df2fc999dbdb5e4f97912b29fed50ec72561", "content_id": "71e25677b0487b0246fc96f640be0f19db826852", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11011, "license_type": "no_license", "max_line_length": 80, "num_lines": 327, "path": "/src/game.py", "repo_name": "DanTulovsky/game", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python\n# -*- coding: utf-8 -*-\n\nfrom copy import *\nimport gflags\nimport logging\nimport random\nimport sys\nimport time\n\nfrom colors import *\nimport random\nimport teams\nimport world\nfrom pygame.locals import *\nfrom square import *\nimport world_rules\n\n\nFLAGS = gflags.FLAGS\ngflags.DEFINE_integer(\"max_generations\", 2000000, \"Generations to run.\")\ngflags.DEFINE_integer(\"world_height\", 1200, \"World height in px.\")\ngflags.DEFINE_integer(\"world_width\", 1200, \"World height in px.\")\ngflags.DEFINE_integer(\"square_width\", 20, \"Citizen square width.\")\ngflags.DEFINE_integer(\"fps\", 5, \"Frame per second (wait time between gens.\")\ngflags.DEFINE_bool(\"random_events\", False, \"Allow random events.\")\ngflags.DEFINE_string(\"rules\", None, \"World rules to use.\")\ngflags.DEFINE_bool(\"random_start\", False, \"Random initial allocation.\")\ngflags.DEFINE_bool(\"manual\", False, \"Allows manual layout of initial \"\n \"position, disables random_start.\")\ngflags.DEFINE_integer(\"workers\", 100, \"Worker threads.\")\n\n\nclass Error(Exception):\n \"\"\"General Exception.\"\"\"\n\n\nclass Game(object):\n \"\"\"Models generations of squares.\n\n Squares change color based on their own color and the color\n of their neighbors. Each generation is simply a row of\n squares.\n\n Empty space on either side is considered to be a Square.Colors.black square.\n \"\"\"\n\n def __init__(self, world_rules, max_generations=None,\n world_width=None, world_height=None,\n square_width=None, random_events=None):\n \"\"\"Init.\n\n Args:\n world_rules: (WorldRules) Or derivative.\n max_generations: (int) how many iterations to run throuhg\n world_width: (int) pixel width of the world\n world_height: (int) pixel height of the world.\n \"\"\"\n # Keeps track of squares in the world\n self.squares = {}\n\n # Teams\n self.teams = teams.TEAMS\n\n self.max_generations = max_generations or FLAGS.max_generations\n self.world_width = world_width or FLAGS.world_width\n self.world_height = world_height or FLAGS.world_height\n self.square_width = square_width or FLAGS.square_width\n random_events = random_events or FLAGS.random_events\n # max squares in one line/row\n self.max_squares = self.world_width / self.square_width\n\n # drawing surface\n self.world = world.World(self.world_width, self.world_height,\n self.square_width, world_rules, self.squares,\n black_white=FLAGS.black_white,\n random_events = random_events,\n workers=FLAGS.workers)\n\n def AddSquare(self, position, color=Colors.black, overwrite=False,\n team=None):\n \"\"\"Adds a new square.\n\n Args:\n position: (tuple) (top, left)\n overwrite: (bool) If True, overwrites existing square\n team: (teams.Team) team to join.\n \"\"\"\n if position[0] < 0 or position[0] >= self.max_squares:\n return\n\n if position[1] < 0 or position[1] >= self.max_squares:\n return\n\n if team is None:\n team = random.choice(teams.TEAMS.values())\n\n if not position in self.squares or overwrite:\n self.squares[position] = Square(position=position, color=color, team=team)\n\n def AddNeighborsToList(self, square):\n \"\"\"Adds a square's neighbors to the self.squares list.\n\n Args:\n square: (Square) square object\n \"\"\"\n for pos in [square.above, square.below, square.right, square.left,\n square.upperleft, square.upperright, square.lowerleft,\n square.lowerright]:\n self.AddSquare(pos, color=Colors.white)\n\n def DisplayGeneration(self):\n \"\"\"Draws current state of the world.\"\"\"\n for top, left in sorted(self.squares.iterkeys()):\n square = self.squares[(top, left)]\n\n if square.color == square.color_new == Colors.white:\n # two generations white, remove from list\n del self.squares[square.position]\n\n if square.color == Colors.white and square.color_new == Colors.black:\n self.AddNeighborsToList(square)\n\n if square.color == Colors.black and square.color_new == Colors.black:\n self.AddNeighborsToList(square)\n\n if square.color == Colors.black and square.color_new == Colors.white:\n self.AddNeighborsToList(square)\n\n # update with next generation values\n square.UpdateNextGenValues()\n\n self.world.DrawSquare(top*self.world.square_width,\n left*self.world.square_width,\n square.color, square.tint)\n self.world.Update()\n\n def NextGeneration(self):\n \"\"\"Generates next generation.\"\"\"\n self.world.NextGeneration()\n\n def CreateWorld(self):\n \"\"\"Creates drawing surface.\"\"\"\n self.world.Initialize()\n\n def HandleNonGridClick(self, positions):\n \"\"\"Handles non-grid click.\n\n Args:\n position: (tuple) mousex, mousey pixel.\n \"\"\"\n pass\n\n def ToggleSquare(self, position):\n \"\"\"Turns a square on (black) or off (white) based on mouse click.\n\n Args:\n position: (tuple) mousex, mousey pixel.\n \"\"\"\n mousex, mousey = position\n # round down both to nearest 10. Divide by 10, take int part, * 10.\n x = round(mousex / self.world.square_width)\n y = round(mousey / self.world.square_width)\n\n if mousex > self.world_width or mousey > self.world_height:\n self.HandleNonGridClick(position)\n return\n\n if (x, y) not in self.squares:\n # create new black square\n self.AddSquare((x,y), Colors.black, overwrite=True)\n else:\n # square exists\n square = self.squares[(x, y)]\n if square.color == Colors.black:\n square.color_new = Colors.white\n square.tint = Colors.custom(0, 0, 0)\n square.tint_new = None\n else:\n square.color = Colors.black\n\n self.AddNeighborsToList(self.squares[x,y])\n\n def Run(self):\n \"\"\"Runs game.\n\n Args:\n generations: (int) run for this many generations.\n \"\"\"\n #self.DisplayGeneration()\n generations = 0\n msg = \"\"\n\n while generations < self.max_generations:\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == MOUSEBUTTONUP:\n logging.info(\"Mouse clicked on: %s\", (event.pos,))\n self.ToggleSquare(event.pos)\n elif event.type == KEYDOWN:\n logging.info(\"Key '%s' pressed.\", event.unicode)\n\n # Set message\n self.world.SetMessage(\"Gen = %s\" % generations)\n # displays current generation\n self.DisplayGeneration()\n # Changes squares to next generation values\n self.NextGeneration()\n # Pause\n self.world.fps_clock.tick(FLAGS.fps)\n generations += 1\n\n # print \"Total interesting squares: %s\" % len(self.squares)\n\n def ManualPosition(self):\n \"\"\"Allows user to set manual position. <enter> when finsihed.\"\"\"\n finished = False\n while not finished:\n for event in pygame.event.get():\n if event.type == MOUSEBUTTONUP:\n logging.info(\"Mouse clicked on: %s\", (event.pos,))\n self.ToggleSquare(event.pos)\n self.DisplayGeneration()\n elif event.type == KEYDOWN:\n if event.key in [K_KP_ENTER, K_RETURN]:\n finished = True\n\n\ndef Setup(argv):\n \"\"\"Pre-game setup.\"\"\"\n if argv is None:\n argv = sys.argv\n try:\n argv = FLAGS(argv) # parse flags\n except gflags.FlagsError, e:\n print '%s\\\\nUsage: %s ARGS\\\\n%s' % (e, sys.argv[0], FLAGS)\n sys.exit(1)\n\n myformat = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n logging.basicConfig(level=logging.DEBUG, format=myformat)\n\n\ndef SetInitialSquares(game):\n \"\"\"Sets initial game squares.\"\"\"\n if FLAGS.manual:\n game.ManualPosition()\n elif FLAGS.random_start:\n SetRandomSquares(game)\n else:\n initial = [((9, 13), Colors.black, game.teams[\"red\"]),\n ((9, 14), Colors.black, game.teams[\"red\"]),\n ((9, 15), Colors.black, game.teams[\"red\"]),\n ((8, 15), Colors.black, game.teams[\"red\"]),\n ((7, 15), Colors.black, game.teams[\"red\"]),\n ((9, 16), Colors.black, game.teams[\"red\"]),\n ((8, 16), Colors.black, game.teams[\"red\"]),\n ((7, 13), Colors.black, game.teams[\"red\"]),\n ((6, 13), Colors.black, game.teams[\"red\"]),\n ((5, 13), Colors.black, game.teams[\"red\"]),\n ((8, 10), Colors.black, game.teams[\"red\"]),\n ((9, 10), Colors.black, game.teams[\"red\"]),\n ((10, 15), Colors.black, game.teams[\"red\"]),\n ((10, 14), Colors.black, game.teams[\"red\"]),\n ((10, 13), Colors.black, game.teams[\"red\"]),\n ((11, 10), Colors.black, game.teams[\"red\"]),\n ((8, 14), Colors.black, game.teams[\"red\"]),\n ((12, 10), Colors.black, game.teams[\"red\"]),\n ((12, 9), Colors.black, game.teams[\"red\"]),\n ((13, 9), Colors.black, game.teams[\"red\"]),\n ((50, 13), Colors.black, game.teams[\"blue\"]),\n ((50, 14), Colors.black, game.teams[\"blue\"]),\n ((50, 15), Colors.black, game.teams[\"blue\"]),\n ((49, 15), Colors.black, game.teams[\"blue\"]),\n ((48, 15), Colors.black, game.teams[\"blue\"]),\n ((50, 16), Colors.black, game.teams[\"blue\"]),\n ((49, 16), Colors.black, game.teams[\"blue\"]),\n ((48, 13), Colors.black, game.teams[\"blue\"]),\n ((47, 13), Colors.black, game.teams[\"blue\"]),\n ((46, 13), Colors.black, game.teams[\"blue\"]),\n ((49, 10), Colors.black, game.teams[\"blue\"]),\n ((50, 10), Colors.black, game.teams[\"blue\"]),\n ((51, 15), Colors.black, game.teams[\"blue\"]),\n ((51, 14), Colors.black, game.teams[\"blue\"]),\n ((51, 13), Colors.black, game.teams[\"blue\"]),\n ((52, 10), Colors.black, game.teams[\"blue\"]),\n ((49, 14), Colors.black, game.teams[\"blue\"]),\n ((53, 10), Colors.black, game.teams[\"blue\"]),\n ((53, 9), Colors.black, game.teams[\"blue\"]),\n ((54, 9), Colors.black, game.teams[\"blue\"]),\n ]\n\n for index, color, team in initial:\n game.AddSquare(index, color, overwrite=True, team=team)\n game.AddNeighborsToList(game.squares[index])\n\n\ndef SetRandomSquares(game, black_white=True):\n \"\"\"Sets random squares in the game.\"\"\"\n max_squares = FLAGS.world_width / FLAGS.square_width\n\n for x in xrange(0, max_squares):\n for y in xrange(0, max_squares):\n color = random.choice(Colors.all())\n\n if color != Colors.white:\n game.AddSquare((x, y), color, overwrite=True)\n game.AddNeighborsToList(game.squares[(x,y)])\n\n\ndef main(argv=None):\n \"\"\"Main, run me.\"\"\"\n Setup(argv)\n\n rules = getattr(world_rules, str(FLAGS.rules))\n game = Game(rules)\n game.CreateWorld()\n\n SetInitialSquares(game)\n\n game.Run()\n raw_input(\"Hit <enter> to exit...\")\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.6743420958518982, "alphanum_fraction": 0.6743420958518982, "avg_line_length": 18.0625, "blob_id": "3f296740f14af367283b9a70689770f737143959", "content_id": "bb8c1842f2e10f93294449638c9fbcd8e5736fd8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 304, "license_type": "no_license", "max_line_length": 70, "num_lines": 16, "path": "/src/profile/examine.py", "repo_name": "DanTulovsky/game", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python\n\nimport gflags\nimport pstats\nimport sys\n\nFLAGS = gflags.FLAGS\n\ngflags.DEFINE_string(\"file\", None, \"File that contains profile info.\",\n short_name=\"-f\")\n\nargv = FLAGS(sys.argv)\n\np = pstats.Stats(FLAGS.file)\n\np.strip_dirs().sort_stats(\"cumulative\").print_stats()" }, { "alpha_fraction": 0.6099432110786438, "alphanum_fraction": 0.6159090995788574, "avg_line_length": 27.047809600830078, "blob_id": "5c552d9cee53ad52205efce66f8797a8f15b1e59", "content_id": "0fa4a698c41cb4dee4ad3b29aea23b260da48731", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7040, "license_type": "no_license", "max_line_length": 86, "num_lines": 251, "path": "/src/world.py", "repo_name": "DanTulovsky/game", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"pygame helper.\"\"\"\n\nfrom colors import Colors\n\nimport gflags\nimport pygame\nimport Queue\nimport sys\nimport threading\n\nfrom pygame.locals import *\n\nfrom memoized import *\nimport world_rules\n\nFLAGS = gflags.FLAGS\n\nBACKGROUND = Colors.white()\n\n\nclass Error(Exception):\n \"\"\"General error.\"\"\"\n\n\nclass NoNeighbor(Error):\n \"\"\"Raised if square has no neighbor in the indicated direction.\"\"\"\n\n\nclass OutsideWorld(Error):\n \"\"\"Raised if neighbor would be outside the known world.\"\"\"\n\n\nclass World(object):\n \"\"\"The world (grid). (0,0) is top left. Should be square.\"\"\"\n\n def __init__(self, width, height, square_width, rules, squares,\n black_white=False, random_events=False, workers=10):\n \"\"\"Init.\n\n Args:\n width: (int) width of the world in pixels.\n height: (int) height of the world in pixels.\n square_width: (int) width/height of citizen squares\n rules: (world_rules.WorldRules) world rules, function pointer\n squares: (dict) dict of Square objects. Current state.\n \"\"\"\n pygame.init()\n self.width = width\n self.height = height\n self.square_width = square_width\n self.msg_width = 100\n self.black_white = black_white\n self.rules = rules()\n self.squares = squares\n self.random_events = random_events\n # worker queue\n #self.q = Queue.Queue()\n #self.q = multiprocessing.JoinableQueue()\n #self._StartWorkers(workers)\n\n def Initialize(self):\n self.fps_clock = pygame.time.Clock()\n self.surface = pygame.display.set_mode((self.width + self.msg_width, self.height))\n self.fontObj = pygame.font.Font('freesansbold.ttf', 14)\n\n # set background\n self.Clear()\n\n # queue worker for processing next_gen data\n def Worker(self):\n while True:\n func, args = self.q.get()\n func(*args)\n self.q.task_done()\n\n def NextGeneration(self):\n \"\"\"Moves world to next generation.\"\"\"\n# for square_index, square in self.squares.iteritems():\n# # Put each next_gen square in a worker thread\n# self.q.put((self.NextGenSquare, [square_index]))\n#\n# # wait for all threads to finish work\n# self.q.join()\n\n for square_index in self.squares.iterkeys():\n self.NextGenSquare(square_index)\n\n # reset memoized cache\n #Memoized.reset()\n\n def NextGenSquare(self, square_index):\n \"\"\"Sets the square's nextgen state.\n\n Args:\n square_index: (int) square index in world\n \"\"\"\n square = self.squares[square_index]\n neighbors = self._NeighborCount(square)\n self.rules.NextGen(square, self.random_events, neighbors)\n\n\n def SetMessage(self, msg=\"Game\"):\n self.msgSurfaceObj = self.fontObj.render(msg, False, Colors.blue())\n self.msgRectobj = self.msgSurfaceObj.get_rect()\n self.msgRectobj.topleft = (self.width + 4, 0)\n\n # Clear previous msg\n self.surface.fill(Colors.white(), self.msgRectobj)\n\n self.surface.blit(self.msgSurfaceObj, self.msgRectobj)\n\n def DrawSquare(self, top, left, color, tint=None):\n # Draw inside grid.\n square_color = color() + tint\n pygame.draw.rect(\n self.surface, square_color, (top+1, left+1, self.square_width - 1,\n self.square_width - 1))\n\n def DrawGrid(self, color=Colors.green()):\n \"\"\"Draws grid.\"\"\"\n self._DrawVerticalGrid(color=color)\n self._DrawHorizontalGrid(color=color)\n\n def Clear(self):\n \"\"\"Clears surface.\"\"\"\n self.surface.fill(BACKGROUND)\n self.DrawGrid()\n self.Update()\n\n def Update(self):\n \"\"\"Redraws surface.\"\"\"\n pygame.display.update()\n\n def InWorld(self, position):\n \"\"\"Returns True if given position is in the World.\n\n Args:\n position: (tuple) (top, left) pixel.\n \"\"\"\n top, left = position[0], position[1]\n if (top < 0 or\n left < 0 or\n top + self.square_width > self.height or\n left + self.square_width > self.width):\n return False\n else:\n return True\n\n #@Memoized\n def _NeighborColor(self, square, position):\n \"\"\"Returns the color of the neighbor in position.\n\n Args:\n square: (Square) Object\n position: (string) Valid position from Square class (RIGHT, LEFT, etc)\n\n Returns:\n color, team: (Square.COLOR, teams.Team) color and team of neighbor.\n\n Raises:\n NoNeighbor: If there is no neighbor\n OutsideWorld: If neighbor would be outside the known world.\n \"\"\"\n neighbor_position = getattr(square, position)\n\n if neighbor_position in self.squares:\n return (self.squares[neighbor_position].color,\n self.squares[neighbor_position].team)\n else:\n raise NoNeighbor(\"No neighbor at %s\" % (neighbor_position,))\n\n def _NeighborCount(self, square):\n \"\"\"Returns the count and color of a square's neighbors.\n\n Args:\n square: (Square) Object\n\n Returns:\n neighbors: (dict) {\n Color.white: N, # total\n \"friends\": H,\n \"enemies\": J,\n \"total_neighbors\": L, # non-white neighbors\n \"influence\": Team # the team with the most influence\n # simply with the most squares\n # surrounding this one\n }\n \"\"\"\n neighbors = {\"total_neighbors\": 0, \"friends\": 0, \"enemies\": 0}\n\n\n if square.position == (46, 14):\n pass\n # initialize all colors to 0\n for color in Colors.all():\n neighbors[color] = 0\n\n neighbor_team_count = {}\n\n for pos in (world_rules.POSITIONS):\n try:\n neighbor_color, neighbor_team = self._NeighborColor(square, pos)\n except (OutsideWorld, NoNeighbor):\n continue\n\n neighbors[neighbor_color] += 1\n if neighbor_color != Colors.white:\n neighbors[\"total_neighbors\"] += 1\n\n neighbor_team_count.setdefault(neighbor_team, 0)\n neighbor_team_count[neighbor_team] += 1\n\n if neighbor_team == square.team:\n neighbors[\"friends\"] += 1\n else:\n neighbors[\"enemies\"] += 1\n\n influence = None\n max = 0\n for team, count in neighbor_team_count.iteritems():\n if count > max:\n max = count\n influence = team\n\n\n neighbors[\"influence\"] = influence\n\n return neighbors\n\n def _DrawVerticalGrid(self, color):\n \"\"\"Draws vertical grid.\"\"\"\n for x in xrange(0, self.width + self.square_width, self.square_width):\n pygame.draw.line(self.surface, color, (x, 0),\n (x, self.height), 1)\n\n def _DrawHorizontalGrid(self, color):\n \"\"\"Draws horizontal grid.\"\"\"\n for y in xrange(0, self.height + self.square_width, self.square_width):\n pygame.draw.line(self.surface, color, (0, y),\n (self.width, y), 1)\n\n def _StartWorkers(self, num_worker_threads):\n \"\"\"Starts queue workers.\"\"\"\n for i in range(num_worker_threads):\n t = threading.Thread(target=self.Worker)\n #t = multiprocessing.Process(target=self.Worker)\n t.daemon = True\n t.start()\n" } ]
8
mek05/Task-1
https://github.com/mek05/Task-1
94c58e2ebc6272c11339985009c36a77885824da
3304951e708a3a991f25e60f8d16a1334836d08b
8e08027bf7d8a8b167f9d8eafbc81b1132f88d30
refs/heads/main
2023-07-21T11:54:16.357028
2021-08-21T01:39:23
2021-08-21T01:39:23
398,437,363
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6116838455200195, "alphanum_fraction": 0.6288659572601318, "avg_line_length": 19.714284896850586, "blob_id": "5d94d1960b1158fc8a9b7cc17c6d135d56bae7cd", "content_id": "f2defb7b4b87ad8707f65f60c0e8c0110075be60", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 291, "license_type": "no_license", "max_line_length": 51, "num_lines": 14, "path": "/Task1.py", "repo_name": "mek05/Task-1", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\nr = float(input(\"input the radius of the circle:\"))\nA = r*r*3.14\nprint(\"Area of the circle with radius\",r,\"is\",A)\n\nfn = input(\"Enter filename:\")\nf = fn.split(\".\")\nprint(\"Extension of the file name is:\"+f[-1])\n " } ]
1
cristobaltapia/dotfiles_main
https://github.com/cristobaltapia/dotfiles_main
cf68264d211864a051f55cdcfaf702c0d1cf2a3c
cb3453193da52c5bf2cded826a4228edb3062571
c3c4f2e3e406822a4ba57da2186aacea643fe924
refs/heads/master
2023-09-04T11:51:31.146429
2023-08-29T22:23:20
2023-08-29T22:23:20
63,482,051
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.4781915545463562, "alphanum_fraction": 0.4798997938632965, "avg_line_length": 27.69607925415039, "blob_id": "592c41e9f564d2507a9bb6aa8505c000c659a9eb", "content_id": "d9008c7d4473824e435fbc8444ffb3f085a27ca9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 8781, "license_type": "no_license", "max_line_length": 107, "num_lines": 306, "path": "/home/.config/nvim/lua/plugins/utils.lua", "repo_name": "cristobaltapia/dotfiles_main", "src_encoding": "UTF-8", "text": "-- Define hints for hydra\nlocal hint_diagram = [[\n Arrow^^^^^^ Select region with <C-v>\n ^ ^ _K_ ^ ^ _f_: surround it with box\n _H_ ^ ^ _L_\n ^ ^ _J_ ^ ^ _q_\n]]\n\nlocal hint_git = [[\n _J_: next hunk _s_: stage hunk _d_: show deleted _b_: blame line\n _K_: prev hunk _u_: undo last stage _p_: preview hunk _B_: blame show full\n ^ ^ _S_: stage buffer ^ ^ _/_: show base file\n ^\n ^ ^ _<Enter>_: Fugitive _q_: exit\n]]\n\nlocal hint_ltex = [[\n ^^ Select Language for Ltex ^\n^ ^\n _e_: English (American) ^\n _d_: German (Germany) ^\n _s_: Spanish\n^ ^\n _q_: Exit\n]]\n\nreturn {\n -- Undo history\n {\n 'mbbill/undotree',\n keys = {\n { \"<F3>\", \"<cmd>UndotreeToggle<cr>\", \"n\" },\n },\n config = function()\n vim.g.undotree_WindowLayout = 2\n vim.g.undotree_SetFocusWhenToggle = true\n end\n },\n -- Web devicons\n { \"nvim-tree/nvim-web-devicons\", name = \"web-devicons\" },\n -- Better ui elements for nvim\n { 'stevearc/dressing.nvim' },\n -- Char-based diff\n {\n 'rickhowe/diffchar.vim',\n ft = { 'markdown', 'markdown.pandoc', 'tex' }\n },\n {\n 'b3nj5m1n/kommentary',\n branch = 'main',\n config = function()\n require('kommentary.config').configure_language(\"julia\", {\n single_line_comment_string = \"#\",\n prefer_single_line_comments = true,\n })\n require('kommentary.config').configure_language(\"lua\", {\n prefer_single_line_comments = true,\n })\n end\n },\n -- Better movements\n {\n 'ggandor/leap.nvim',\n keys = {\n { \"s\", \"<Plug>(leap-forward-to)\", \"n\", { silent = true } },\n { \"S\", \"<Plug>(leap-backward-to)\", \"n\", { silent = true } },\n },\n config = function()\n vim.api.nvim_set_hl(0, 'LeapBackdrop', { link = 'Comment' })\n end\n },\n -- Hydra\n {\n 'anuvyklack/hydra.nvim',\n name = \"hydra\",\n lazy = true,\n keys = {\n \"<leader>g\", \"<leader>hd\", \"<leader>hl\",\n },\n config = function(_, opts)\n local Hydra = require('hydra')\n\n Hydra({\n name = 'Draw Diagram',\n hint = hint_diagram,\n config = {\n color = 'pink',\n invoke_on_body = true,\n hint = {\n border = 'rounded'\n },\n on_enter = function()\n vim.o.virtualedit = 'all'\n end,\n },\n mode = 'n',\n body = '<leader>hd',\n heads = {\n { 'H', '<C-v>h:VBox<CR>' },\n { 'J', '<C-v>j:VBox<CR>' },\n { 'K', '<C-v>k:VBox<CR>' },\n { 'L', '<C-v>l:VBox<CR>' },\n { 'f', ':VBox<CR>', { mode = 'v' } },\n { 'q', nil, { exit = true } },\n }\n })\n\n local gitsigns = require('gitsigns')\n\n Hydra({\n name = 'Git',\n hint = hint_git,\n config = {\n buffer = bufnr,\n color = 'pink',\n invoke_on_body = true,\n hint = {\n border = 'rounded'\n },\n on_enter = function()\n vim.cmd 'mkview'\n vim.cmd 'silent! %foldopen!'\n vim.bo.modifiable = false\n gitsigns.toggle_signs(true)\n gitsigns.toggle_linehl(true)\n gitsigns.toggle_word_diff(true)\n end,\n on_exit = function()\n local cursor_pos = vim.api.nvim_win_get_cursor(0)\n vim.cmd 'loadview'\n vim.api.nvim_win_set_cursor(0, cursor_pos)\n vim.cmd 'normal zv'\n gitsigns.toggle_signs(true)\n gitsigns.toggle_linehl(false)\n gitsigns.toggle_deleted(false)\n gitsigns.toggle_word_diff(false)\n end,\n },\n mode = { 'n', 'x' },\n body = '<leader>g',\n heads = {\n { 'J',\n function()\n if vim.wo.diff then return ']c' end\n vim.schedule(function() gitsigns.next_hunk() end)\n return '<Ignore>'\n end,\n { expr = true, desc = 'next hunk' } },\n { 'K',\n function()\n if vim.wo.diff then return '[c' end\n vim.schedule(function() gitsigns.prev_hunk() end)\n return '<Ignore>'\n end,\n { expr = true, desc = 'prev hunk' } },\n { 's', ':Gitsigns stage_hunk<CR>', { silent = true, desc = 'stage hunk' } },\n { 'u', gitsigns.undo_stage_hunk, { desc = 'undo last stage' } },\n { 'S', gitsigns.stage_buffer, { desc = 'stage buffer' } },\n { 'p', gitsigns.preview_hunk, { desc = 'preview hunk' } },\n { 'd', gitsigns.toggle_deleted, {\n nowait = true,\n desc = 'toggle deleted'\n } },\n { 'b', gitsigns.blame_line, { desc = 'blame' } },\n { 'B', function() gitsigns.blame_line { full = true } end, { desc = 'blame show full' } },\n { '/', gitsigns.show, {\n exit = true,\n desc = 'show base file'\n } }, -- show the base of the file\n { '<Enter>', '<Cmd>Git<CR>', { exit = true, desc = 'Fugitive' } },\n { 'q', nil, {\n exit = true,\n nowait = true,\n desc = 'exit'\n } },\n }\n })\n\n -- Change language for ltex\n Hydra({\n name = 'Change Ltex language',\n hint = hint_ltex,\n config = {\n color = 'pink',\n invoke_on_body = true,\n hint = {\n border = 'rounded'\n },\n on_enter = function()\n vim.o.virtualedit = 'all'\n end,\n },\n mode = 'n',\n body = '<leader>hl',\n heads = {\n { 'e', function()\n require(\"lspconfig\").ltex.setup({ settings = { ltex = { language = \"en-US\" } } })\n vim.opt.spelllang = \"en_us\"\n end },\n { 'd', function()\n require(\"lspconfig\").ltex.setup({ settings = { ltex = { language = \"de-DE\" } } })\n vim.opt.spelllang = \"de_de\"\n end },\n { 's', function()\n require(\"lspconfig\").ltex.setup({ settings = { ltex = { language = \"es\" } } })\n vim.opt.spelllang = \"es\"\n end },\n { 'q', nil, { exit = true } },\n }\n })\n end\n },\n -- Docstrings generator\n {\n \"danymat/neogen\",\n keys = {\n { \"<leader>ds\", \"<cmd>Neogen func<cr>\", desc = \"Generate func docstrings\" },\n { \"<leader>dc\", \"<cmd>Neogen class<cr>\", desc = \"Generate class docstrings\" },\n },\n opts = {\n enabled = true,\n input_after_comment = true,\n languages = {\n python = {\n template = {\n annotation_convention = 'numpydoc',\n }\n }\n }\n },\n },\n -- Surround movements\n { 'tpope/vim-surround' },\n -- Close pairs of parentheses, quotes, etc\n {\n 'windwp/nvim-autopairs',\n config = true,\n },\n -- Different nice improvements for neovim\n {\n 'echasnovski/mini.nvim',\n branch = 'main',\n event = { \"BufReadPre\", \"BufNewFile\" },\n config = function()\n local animate = require('mini.animate')\n require('mini.trailspace').setup()\n require('mini.indentscope').setup()\n require('mini.animate').setup({\n cursor = { enable = false },\n resize = { enable = true },\n scroll = { enable = false, timing = animate.gen_timing.linear({ duration = 150, unit = 'total' }) }\n })\n end\n },\n -- Support for openning GNUGP excrypted files\n { 'jamessan/vim-gnupg' },\n -- Draw ascii diagrams\n {\n 'jbyuki/venn.nvim',\n dependencies = { \"hydra\" },\n },\n -- Snippets\n {\n 'SirVer/ultisnips',\n dependencies = {\n { 'honza/vim-snippets' },\n { 'cristobaltapia/MySnippets' }\n },\n config = function()\n vim.g.UltiSnipsSnippetDirectories = {\n 'UltiSnips',\n vim.env.HOME .. '/.local/share/nvim/lazy/MySnippets/Ultisnips',\n vim.env.HOME .. '/Templates/ultisnips-templates'\n }\n\n -- Set the smart function definition to use numpy style for docstrings\n vim.g.ultisnips_python_style = \"numpy\"\n vim.g.UltisnipsUsePythonVersion = 3\n end\n },\n -- Follow symlinks\n {\n 'aymericbeaumet/vim-symlink',\n -- 'Jasha10/vim-symlink',\n dependencies = { 'moll/vim-bbye' },\n },\n {\n \"jackMort/ChatGPT.nvim\",\n cmd = { \"ChatGPT\", \"ChatGPTRun\", \"ChatGPTEditWithInstructions\" },\n config = function()\n local home = vim.fn.expand(\"$HOME\")\n require(\"chatgpt\").setup(\n {\n api_key_cmd = \"cat \" .. home .. \"/.config/chatgpt/api\",\n actions_paths = { vim.env.HOME .. \"/.config/chatgpt/actions.json\" }\n }\n )\n end,\n dependencies = {\n \"MunifTanjim/nui.nvim\",\n \"nvim-lua/plenary.nvim\",\n \"nvim-telescope/telescope.nvim\"\n }\n }\n}\n-- vim: set shiftwidth=2:\n" }, { "alpha_fraction": 0.5131086111068726, "alphanum_fraction": 0.5243445634841919, "avg_line_length": 27.60714340209961, "blob_id": "ceb9137fe175750b24eb8e38e413bd356c672af2", "content_id": "5058a703e4dbe83093d8df3fa8d0da9956029f19", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 801, "license_type": "no_license", "max_line_length": 66, "num_lines": 28, "path": "/home/.local/bin/fexdox-completion.bash", "repo_name": "cristobaltapia/dotfiles_main", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\n_fexdox_completions()\n{\n # Consider account ID if given\n options=\"\"\n for IDX in ${!COMP_WORDS[@]}; do\n if [[ ${COMP_WORDS[$IDX]} = \"-i\" ]]; then\n # Get username\n options=\"-i ${COMP_WORDS[$(($IDX+1))]}\"\n fi\n done\n case ${COMP_WORDS[-2]} in\n -s )\n COMPREPLY=($(compgen -o dirnames \"${COMP_WORDS[-1]}\"));;\n -r )\n remotes=$(fexdox ${options} -l | awk '{print $5}')\n COMPREPLY=($(compgen -W \"${remotes}\" \"${COMP_WORDS[-1]}\"));;\n -D )\n remotes=$(fexdox ${options} -l | awk '{print $5}')\n COMPREPLY=($(compgen -W \"${remotes}\" \"${COMP_WORDS[-1]}\"));;\n -l )\n remotes=$(fexdox ${options} -l | awk '{print $5}')\n COMPREPLY=($(compgen -W \"${remotes}\" \"${COMP_WORDS[-1]}\"));;\n esac\n}\n\ncomplete -F _fexdox_completions fexdox\n" }, { "alpha_fraction": 0.6847977638244629, "alphanum_fraction": 0.6908414959907532, "avg_line_length": 21.882978439331055, "blob_id": "8f3cd6d221683cff0ccdb47cb41d5471285099d5", "content_id": "bdf08258cc1befccc1c5f7f815c42c001e27e411", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 2154, "license_type": "no_license", "max_line_length": 79, "num_lines": 94, "path": "/home/.config/nvim/lua/tapia/set.lua", "repo_name": "cristobaltapia/dotfiles_main", "src_encoding": "UTF-8", "text": "vim.g.python3_host_prog = vim.env.HOME .. '/.virtualenvs/py3neovim/bin/python3'\n\nvim.opt.title = true\nvim.opt.titlestring = \"%t %y\"\nvim.opt.autoread = true\nvim.opt.swapfile = false\nvim.opt.backup = false\nvim.opt.encoding = \"utf-8\"\nvim.opt.cursorline = true\n\n-- Line numbers\nvim.opt.nu = true\nvim.opt.relativenumber = true\n\n-- Lineabraks with indentation\nvim.opt.linebreak = true\nvim.opt.breakindent = true\nvim.opt.showbreak = \"󱞩 \"\nvim.opt.wrap = true\n\n-- Change buffer without saving\nvim.opt.hidden = true\n\n-- Atomatically change current working directory to the directory\n-- containing the file in the buffer\nvim.opt.autochdir = true\n\n-- Use spaces to replace tabs\nvim.opt.expandtab = true\n\n-- Allow virtual editing in visual block\nvim.opt.virtualedit = \"block\"\n\n-- Ignore certain files\nvim.opt.wildignore = { \"*.swp\", \"*.bak\", \"*.pyc\", \"*.class\", \"*.aux\", \"*.toc\" }\n\n-- Always show status bar\nvim.opt.laststatus = 2\n\nvim.opt.hlsearch = false\nvim.opt.incsearch = true\n\nvim.opt.termguicolors = true\n\nvim.opt.scrolloff = 8\nvim.opt.signcolumn = \"yes\"\nvim.opt.isfname:append(\"@-@\")\n\nvim.opt.diffopt:append(\"vertical\")\nvim.opt.diffopt:append(\"linematch:60\")\n\nvim.opt.shada:append('r/mnt/intcdc')\n\nvim.g.mapleader = \"\\\\\"\n\n-- Visual selection copies to the clipboard\nvim.opt.clipboard:append('unnamedplus')\n\nvim.g.completion_sorting = \"none\"\nvim.g.completeopt = \"menu,menuone,noinsert\"\n\n-- Size of the completion menu\nvim.opt.pumheight = 15\n\n-- Set python syntax for Freecad macros\nvim.api.nvim_create_autocmd({ \"BufEnter\", \"BufNewFile\" }, {\n pattern = { \"*.fcmacro\" },\n callback = function()\n vim.opt.filetype = \"python\"\n end\n})\n-- Set tex syntax for pdf_tex files\nvim.api.nvim_create_autocmd({ \"BufEnter\", \"BufNewFile\" }, {\n pattern = { \"*.pdf_tex\" },\n callback = function()\n vim.opt.filetype = \"tex\"\n end\n})\n\n-- dont list quickfix buffers\nvim.api.nvim_create_autocmd(\"FileType\", {\n pattern = \"qf\",\n callback = function()\n vim.opt_local.buflisted = false\n end,\n})\n\n-- Disable editorconfig\nvim.g.editorconfig = false\n\n-- Set default tabsize\nvim.opt.softtabstop = 4\nvim.opt.shiftwidth = 4\nvim.opt.tabstop = 4\n" }, { "alpha_fraction": 0.5100317001342773, "alphanum_fraction": 0.5129883885383606, "avg_line_length": 29.54838752746582, "blob_id": "d2dc02ee35d8bbfb8fe6ca3dddc9d8c5a64ebf3d", "content_id": "427158cbffcb6a0e630d5c689ac21a9bff965bed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 9474, "license_type": "no_license", "max_line_length": 106, "num_lines": 310, "path": "/home/.config/nvim/lua/plugins/lsp.lua", "repo_name": "cristobaltapia/dotfiles_main", "src_encoding": "UTF-8", "text": "return {\n -- Lsp-connfig\n {\n 'neovim/nvim-lspconfig',\n dependencies = {\n { \"folke/neodev.nvim\" },\n { \"williamboman/mason-lspconfig.nvim\" },\n { \"nvim-lua/plenary.nvim\" },\n { \"ray-x/lsp_signature.nvim\" },\n },\n event = { \"BufReadPre\", \"BufNewFile\" },\n opts = {\n -- options for vim.diagnostic.config()\n diagnostics = {\n underline = true,\n update_in_insert = false,\n virtual_text = {\n spacing = 4,\n source = \"if_many\",\n prefix = \"●\",\n -- this will set set the prefix to a function that returns\n -- the diagnostics icon based on the severity this only\n -- works on a recent 0.10.0 build. Will be set to \"●\" when\n -- not supported prefix = \"icons\",\n },\n severity_sort = true,\n },\n -- Enable this to enable the builtin LSP inlay hints on Neovim >=\n -- 0.10.0 Be aware that you also will need to properly configure\n -- your LSP server to provide the inlay hints.\n inlay_hints = {\n enabled = false,\n },\n -- add any global capabilities here\n capabilities = {},\n -- Automatically format on save\n autoformat = false,\n },\n config = function(_, opts)\n local Path = require(\"plenary.path\")\n require(\"neodev\").setup { lspconfig = true, }\n\n local lspconfig = require('lspconfig')\n local lsp_defaults = lspconfig.util.default_config\n\n lsp_defaults.capabilities = vim.tbl_deep_extend(\n 'force',\n lsp_defaults.capabilities,\n require('cmp_nvim_lsp').default_capabilities()\n )\n\n vim.api.nvim_create_autocmd('LspAttach', {\n desc = 'LSP actions',\n callback = function(event)\n -- Define current bufnr\n local bufnr = event.buf\n local map = function(m, lhs, rhs)\n local options = { buffer = bufnr, remap = false }\n vim.keymap.set(m, lhs, rhs, options)\n end\n\n -- LSP actions\n map('n', 'K', vim.lsp.buf.hover)\n map('n', 'gd', vim.lsp.buf.definition)\n map('n', 'gD', vim.lsp.buf.declaration)\n map('n', 'gi', vim.lsp.buf.implementation)\n map('n', 'go', vim.lsp.buf.type_definition)\n map('n', 'gr', vim.lsp.buf.references)\n map('n', 'gs', vim.lsp.buf.signature_help)\n map('n', '<leader>rn', vim.lsp.buf.rename)\n map({ 'n', 'x' }, 'gq', function() vim.lsp.buf.format({ async = true }) end)\n -- Format selected code only\n vim.keymap.set('v', 'gq', function()\n vim.lsp.buf.format({\n async = true,\n timeout_ms = 10000,\n range = { vim.fn.getpos('v'), vim.fn.getcurpos() },\n })\n end)\n map('n', '<leader>ca', vim.lsp.buf.code_action)\n map('x', '<leader>ca', '<cmd>lua vim.lsp.buf.range_code_action()<cr>')\n\n -- Diagnostics\n map('n', 'ge', vim.diagnostic.open_float)\n map('n', '[d', vim.diagnostic.goto_prev)\n map('n', ']d', vim.diagnostic.goto_next)\n end\n })\n\n local function lsp_settings()\n vim.diagnostic.config({\n severity_sort = true,\n float = { border = 'rounded' },\n })\n\n vim.lsp.handlers['textDocument/hover'] = vim.lsp.with(\n vim.lsp.handlers.hover,\n { border = 'rounded' }\n )\n\n vim.lsp.handlers['textDocument/signatureHelp'] = vim.lsp.with(\n vim.lsp.handlers.signature_help,\n { border = 'rounded' }\n )\n\n local command = vim.api.nvim_create_user_command\n\n command('LspWorkspaceAdd', function()\n vim.lsp.buf.add_workspace_folder()\n end, { desc = 'Add folder to workspace' })\n\n command('LspWorkspaceList', function()\n vim.notify(vim.inspect(vim.lsp.buf.list_workspace_folders()))\n end, { desc = 'List workspace folders' })\n\n command('LspWorkspaceRemove', function()\n vim.lsp.buf.remove_workspace_folder()\n end, { desc = 'Remove folder from workspace' })\n end\n\n lsp_settings()\n\n require('mason').setup({})\n\n require('mason-lspconfig').setup({\n ensure_installed = {\n 'bashls',\n 'cssls',\n 'docker_compose_language_service',\n 'dockerls',\n 'efm',\n 'eslint',\n 'fortls',\n 'jsonls',\n 'julials',\n 'ltex',\n 'lua_ls',\n 'marksman',\n 'pyright',\n 'ruff_lsp',\n 'taplo',\n 'tsserver',\n 'typst_lsp',\n }\n })\n\n -- Configure Lua-ls\n lspconfig.lua_ls.setup {\n settings = {\n Lua = {\n -- Disable telemetry\n telemetry = { enable = false },\n runtime = {\n -- Tell the language server which version of Lua you're using\n -- (most likely LuaJIT in the case of Neovim)\n version = 'LuaJIT',\n path = runtime_path,\n },\n diagnostics = {\n -- Get the language server to recognize the `vim` global\n globals = { 'vim' }\n },\n workspace = {\n checkThirdParty = false,\n library = {\n -- Make the server aware of Neovim runtime files\n vim.fn.expand('$VIMRUNTIME/lua'),\n vim.fn.stdpath('config') .. '/lua'\n }\n }\n }\n }\n }\n\n -- Python LSP\n local capabilities = vim.lsp.protocol.make_client_capabilities()\n capabilities.textDocument.publishDiagnostics.tagSupport.valueSet = { 2 }\n lspconfig.pyright.setup {\n settings = {\n python = {\n analysis = {\n autoSearchPaths = true,\n disableOrganizeImports = true,\n useLibraryCodeForTypes = true,\n diagnosticMode = 'workspace',\n autoImportCompletions = false,\n }\n }\n }\n }\n lspconfig.ruff_lsp.setup {}\n -- Typst\n lspconfig.typst_lsp.setup {\n single_file_support = true,\n }\n -- Julia\n lspconfig.julials.setup {}\n -- Latex\n -- lspconfig.texlab.setup {}\n -- Toml\n lspconfig.taplo.setup {}\n -- Docker\n lspconfig.docker_compose_language_service.setup {}\n lspconfig.dockerls.setup {\n on_attach = function(client, bufnr)\n -- Remove syntax from LSP\n client.server_capabilities.semanticTokensProvider = nil\n end,\n }\n -- CSS\n lspconfig.cssls.setup {}\n -- Grammar correctoin using ltex-ls\n local ltex_setup = {\n filetypes = { \"bib\", \"gitcommit\", \"markdown\", \"org\", \"plaintex\", \"rst\", \"rnoweb\", \"tex\", \"pandoc\",\n \"typst\" },\n settings = {\n ltex = {\n language = \"en-US\",\n },\n },\n }\n -- For ltex-ls under archlinux I have to use the system installation, but\n -- for other systems (e.g. Ubuntu) the default cmd works good.\n if Path:new(\"/usr/bin/ltex-ls\"):is_file() then\n ltex_setup[\"cmd\"] = { \"/usr/bin/ltex-ls\" }\n end\n lspconfig.ltex.setup(ltex_setup)\n lspconfig.fortls.setup {}\n -- JSON-ls\n lspconfig.jsonls.setup {}\n\n -- Define formatting for different filetypes\n local texFormatter = 'latexindent --modifylinebreaks -y=\"defaultIndent: \\' \\'\"'\n lspconfig.efm.setup {\n flags = {\n debounce_text_changes = 150,\n },\n init_options = { documentFormatting = true },\n filetypes = { \"python\", \"bib\", \"tex\", \"sty\", \"cls\", \"fortran\" },\n settings = {\n rootMarkers = { \".git/\" },\n languages = {\n python = {\n { formatCommand = \"yapf\", formatStdin = true }\n },\n bib = {\n {\n formatCommand = \"bibtex-tidy --v2 --curly --align=14 --no-escape --sort-fields --sort\",\n formatStdin = true\n }\n },\n fortran = {\n {\n formatCommand = \"findent --continuation=0 --input_format=fixed --indent_procedure=0\",\n formatStdin = true\n }\n },\n tex = { { formatCommand = texFormatter, formatStdin = true } },\n sty = { { formatCommand = texFormatter, formatStdin = true } },\n cls = { { formatCommand = texFormatter, formatStdin = true } },\n }\n }\n }\n\n\n -- Don't show diagnostics in-line\n vim.diagnostic.config({ virtual_text = false })\n\n -- Increase update frequency of the ui\n vim.opt.updatetime = 500\n end\n },\n {\n \"folke/neodev.nvim\",\n opts = {\n lspconfig = true\n }\n },\n -- Mason\n {\n 'williamboman/mason-lspconfig.nvim',\n name = \"mason\",\n dependencies = {\n {\n 'williamboman/mason.nvim',\n build = function()\n pcall(vim.cmd, 'MasonUpdate')\n end,\n },\n }\n },\n -- Function signatures\n {\n \"ray-x/lsp_signature.nvim\",\n lazy = true,\n opts = {\n bind = true, -- This is mandatory, otherwise border config won't get registered.\n hint_enable = true,\n doc_lines = 0,\n floating_window = false,\n toggle_key = \"<C-h>\",\n select_signature_key = \"<C-l>\",\n floating_window_above_cur_line = true,\n handler_opts = {\n border = \"rounded\"\n }\n }\n },\n}\n-- vim: set shiftwidth=2:\n" }, { "alpha_fraction": 0.7234042286872864, "alphanum_fraction": 0.7446808218955994, "avg_line_length": 30.33333396911621, "blob_id": "fb4ca4e1d292d756ca84e64d04d66d6043430802", "content_id": "acb7a1eec3bee81c1c093996b459db545d865319", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 94, "license_type": "no_license", "max_line_length": 37, "num_lines": 3, "path": "/home/.config/nvim/after/ftplugin/mail.lua", "repo_name": "cristobaltapia/dotfiles_main", "src_encoding": "UTF-8", "text": "vim.opt_local.textwidth = 80\nvim.opt_local.formatoptions = \"tcqj]\"\nvim.opt_local.spell = true\n" }, { "alpha_fraction": 0.6006628274917603, "alphanum_fraction": 0.6043910384178162, "avg_line_length": 26.12359619140625, "blob_id": "facca32930dad02906f1c7450a1cac5901dad4a5", "content_id": "c0f50c59c299df4def9eb794310c110784633bec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2414, "license_type": "no_license", "max_line_length": 82, "num_lines": 89, "path": "/home/.config/papis/scripts/papis-bibfile", "repo_name": "cristobaltapia/dotfiles_main", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n# papis-short-help: Convert zotero library to papis library\n# Copyright 2017 Alejandro Gallo. GPLv3\nimport os\nimport sys\n\nfrom bibtexparser.bparser import BibTexParser\n\nimport papis.api\nimport papis.bibtex\nimport papis.commands\nimport papis.commands.add as papis_add\nimport papis.config\nimport papis.utils\n\nimport argparse\n\npars = argparse.ArgumentParser(description=\"Import bibfile to papis library\")\npars.add_argument(\"bibfile\", type=str)\npars.add_argument(\"--lib\", type=str)\n\nargumetns = pars.parse_args()\nbib_file = argumetns.bibfile\n\n\ndef usage():\n print(\"Usage: papis bibfile bibfile --lib\")\n\n\nif not os.path.exists(bib_file):\n print(\"ERROR: Bib file (%s) not found!\" % bib_file)\n usage()\n sys.exit(1)\n\nparser = BibTexParser()\nparser.ignore_nonstandard_types = False\n\n# entries = parser.parse(bib_file)\nwith open(bib_file) as bibtex_file:\n bib_obj = parser.parse_file(bibtex_file)\n\nentries = bib_obj.entries\n\n\ndef add(ref_data, pdf=None):\n\n if pdf_file is not None:\n papis_add.run(paths=[pdf], data=ref_data, git=True)\n else:\n papis_add.run(paths=[], data=ref_data, git=True)\n\n\ncwd = os.getcwd()\n\n# Set the library\nif argumetns.lib:\n papis.api.set_lib_from_name(argumetns.lib)\n print(f\"library {argumetns.lib}\")\n\nfor entry in entries:\n for basic_field in [\"ref\", \"author\", \"title\"]:\n if basic_field not in entry.keys():\n if basic_field == \"ref\":\n entry[basic_field] = entry[\"ID\"]\n elif basic_field == \"author\":\n entry[basic_field] = entry[\"editor\"]\n else:\n entry[basic_field] = papis.utils.input(\n \"%s Not found, please insert\" % basic_field, default=\"???????\"\n )\n\n print(\"INFO: Processing | bibkey: %s\" % entry.get(\"ref\"))\n print(\" |Author: %s\" % entry.get(\"author\"))\n print(\" | Title: %s\" % entry.get(\"title\"))\n print(\" |\")\n\n pdf_file = None\n\n if \"file\" in entry.keys():\n pdf_file = entry.get(\"file\").split(\":\")[1]\n pdf_file = os.path.join(cwd, pdf_file)\n print(\"\\tINFO: File field detected (%s)\" % pdf_file)\n if not os.path.exists(pdf_file):\n print(\"\\tWARNING: Path (%s) not found! Ignoring it\" % pdf_file)\n del entry[\"file\"]\n pdf_file = None\n\n add(entry, pdf_file)\n" }, { "alpha_fraction": 0.6992481350898743, "alphanum_fraction": 0.7368420958518982, "avg_line_length": 21.16666603088379, "blob_id": "4e4df0649408f18820f17e86bf69aa5d79d14aa6", "content_id": "6afa722f5da5753c611b9fad63ecfb5c187a140f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 133, "license_type": "no_license", "max_line_length": 35, "num_lines": 6, "path": "/home/.config/nvim/after/ftplugin/python.lua", "repo_name": "cristobaltapia/dotfiles_main", "src_encoding": "UTF-8", "text": "vim.opt_local.textwidth = 80\nvim.opt_local.formatoptions = \"cqj\"\n\nvim.opt.softtabstop = 4\nvim.opt.shiftwidth = 4\nvim.opt.tabstop = 4\n" }, { "alpha_fraction": 0.5513626933097839, "alphanum_fraction": 0.552410900592804, "avg_line_length": 27.058822631835938, "blob_id": "8544ffd5f42c268f26b4c230e859cb920560ea93", "content_id": "974559d32a09044897b66133b4f19f3e6fa86078", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 954, "license_type": "no_license", "max_line_length": 80, "num_lines": 34, "path": "/home/.config/nvim/lua/plugins/markdown.lua", "repo_name": "cristobaltapia/dotfiles_main", "src_encoding": "UTF-8", "text": "return {\n {\n 'vim-pandoc/vim-pandoc',\n ft = { 'markdown', 'markdown.pandoc', 'pandoc' },\n dependencies = {\n { 'vim-pandoc/vim-pandoc-syntax' },\n { 'godlygeek/tabular' },\n {\n \"iamcco/markdown-preview.nvim\",\n build = function() vim.fn[\"mkdp#util#install\"]() end,\n ft = { 'markdown', 'pandoc', 'markdown.pandoc' }\n }\n },\n config = function()\n vim.cmd(\n [[\n function! g:Open_browser(url)\n \"silent exe 'silent !gnome-www-browser --private-instance ' . a:url . \" &\"\n silent exe 'silent !epiphany --private-instance ' . a:url . \" &\"\n endfunction\n ]]\n )\n\n --[[ local function open_browser(url)\n io.popen(\"epiphany --private-instance \" .. url .. \" &\")\nend ]]\n vim.g.mkdp_browserfunc = 'g:Open_browser'\n\n vim.g.mkdp_filetypes = { 'pandoc', 'markdown', 'markdown.pandoc', 'wiki' }\n vim.g.mkdp_auto_close = false\n end\n },\n}\n-- vim: set shiftwidth=2:\n" }, { "alpha_fraction": 0.6549249887466431, "alphanum_fraction": 0.6640573740005493, "avg_line_length": 30.89583396911621, "blob_id": "a2ab559d1ad8a1bd5739d6cd36790f6f4152f0c8", "content_id": "ca7e79b491dc60278ebbea9d2e62ed94d8c1dc23", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1533, "license_type": "no_license", "max_line_length": 78, "num_lines": 48, "path": "/home/.password-store/.extensions/get_pass.bash", "repo_name": "cristobaltapia/dotfiles_main", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n# pass get - Password Store Extension (https://www.passwordstore.org/)\n# Copyright (C) 2020 James Patrick\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n\n\nPASSWORD_STORE_DIR=\"${PASSWORD_STORE_DIR:-$HOME/.password-store}\"\n\ncmd_get_pass_usage() {\n cat <<-_EOF\nUsage:\n $PROGRAM get_pass pass_name\n Grabs the pass based on rules for Browserpass.\n 1. match \"key:value\" where key, either pass, password, or secret\n 2. if no match is present default to the first line.\n\n https://github.com/browserpass/browserpass-extension\n\n Options:\n Nada.\n_EOF\n exit 0\n}\n\n\ncmd_get_user(){\n local pass_name=\"$1\" val backup\n val=$(pass get \"(pass|password|secret)\" \"$pass_name\")\n backup=$(cmd_show \"$pass_name\" | head -1) \n echo \"${val:-$backup}\"\n}\n\n[[ \"$1\" == \"help\" || \"$1\" == \"--help\" || \"$1\" == \"-h\" ]] && cmd_get_user_usage\n[[ \"$#\" != \"1\" ]] && cmd_get_user_usage\ncmd_get_user \"$@\"\n\n\n" }, { "alpha_fraction": 0.7411003112792969, "alphanum_fraction": 0.8155339956283569, "avg_line_length": 13.714285850524902, "blob_id": "e9a9e759c0b3591e8eabed79ba308020ce779bfe", "content_id": "1954d83060778a359cd5254f385168007e2d6bcb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 309, "license_type": "no_license", "max_line_length": 42, "num_lines": 21, "path": "/home/.config/fuzzel/fuzzel.ini", "repo_name": "cristobaltapia/dotfiles_main", "src_encoding": "UTF-8", "text": "[main]\nfont=Inter Nerd Font:size=14:weight=medium\nicon-theme=Paper\ndpi-aware=auto\n#fields=filename,name,generic\nterminal=foot\nfuzzy=yes\ninner-pad=20\nline-height=25\n\n[colors]\nbackground=3b4252dd\ntext=d8dee9ff\nselection=5e81acff\nselection-text=eceff4ff\nmatch=ebcb8bff\nborder=5e81acff\n\n[border]\nwidth=4\nradius=4\n" }, { "alpha_fraction": 0.5931363105773926, "alphanum_fraction": 0.6252727508544922, "avg_line_length": 27.32022476196289, "blob_id": "6ae5771370cae3326ba63dc3ed2e3916d9460527", "content_id": "e7dd3f1e0bce57432e1229f213a93272edf174b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 5041, "license_type": "no_license", "max_line_length": 192, "num_lines": 178, "path": "/home/.bashrc", "repo_name": "cristobaltapia/dotfiles_main", "src_encoding": "UTF-8", "text": "#\n# ~/.bashrc\n#\n\n# If not running interactively, don't do anything\n[[ $- != *i* ]] && return\n\n# get current branch in git repo\nfunction parse_git_branch() {\n\tBRANCH=`git branch 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \\(.*\\)/\\1/'`\n\tif [ ! \"${BRANCH}\" == \"\" ]\n\tthen\n\t\tSTAT=`parse_git_dirty`\n\t\techo \"[${BRANCH}${STAT}]\"\n\telse\n\t\techo \"\"\n\tfi\n}\n\n# get current status of git repo\nfunction parse_git_dirty {\n\tstatus=`git status 2>&1 | tee`\n\tdirty=`echo -n \"${status}\" 2> /dev/null | grep \"modified:\" &> /dev/null; echo \"$?\"`\n\tuntracked=`echo -n \"${status}\" 2> /dev/null | grep \"Untracked files\" &> /dev/null; echo \"$?\"`\n\tahead=`echo -n \"${status}\" 2> /dev/null | grep \"Your branch is ahead of\" &> /dev/null; echo \"$?\"`\n\tnewfile=`echo -n \"${status}\" 2> /dev/null | grep \"new file:\" &> /dev/null; echo \"$?\"`\n\trenamed=`echo -n \"${status}\" 2> /dev/null | grep \"renamed:\" &> /dev/null; echo \"$?\"`\n\tdeleted=`echo -n \"${status}\" 2> /dev/null | grep \"deleted:\" &> /dev/null; echo \"$?\"`\n\tbits=''\n\tif [ \"${renamed}\" == \"0\" ]; then\n\t\tbits=\">${bits}\"\n\tfi\n\tif [ \"${ahead}\" == \"0\" ]; then\n\t\tbits=\"*${bits}\"\n\tfi\n\tif [ \"${newfile}\" == \"0\" ]; then\n\t\tbits=\"+${bits}\"\n\tfi\n\tif [ \"${untracked}\" == \"0\" ]; then\n\t\tbits=\"?${bits}\"\n\tfi\n\tif [ \"${deleted}\" == \"0\" ]; then\n\t\tbits=\"x${bits}\"\n\tfi\n\tif [ \"${dirty}\" == \"0\" ]; then\n\t\tbits=\"!${bits}\"\n\tfi\n\tif [ ! \"${bits}\" == \"\" ]; then\n\t\techo \" ${bits}\"\n\telse\n\t\techo \"\"\n\tfi\n}\n\nexport PS1=\"[\\[\\e[32m\\]\\u\\[\\e[m\\]\\[\\e[32m\\]@\\[\\e[m\\]\\[\\e[32m\\]\\h\\[\\e[m\\]]:\\[\\e[36m\\]\\W\\[\\e[m\\]\\[\\e[31m\\]\\`parse_git_branch\\`\\[\\e[m\\]$ \"\n\n# Set language for the console\nexport LANG=\"en_US.UTF-8\"\n\nexport EDITORCMD=\"nvim-qt\"\nexport EDITOR=\"nvim\"\n\n# check the window size after each command and, if necessary,\n# update the values of LINES and COLUMNS\nshopt -s checkwinsize\n\n# Virtual Environment\nexport WORKON_HOME=~/.virtualenvs\nsource /usr/bin/virtualenvwrapper.sh\n\nsource \"$HOME/.homesick/repos/homeshick/homeshick.sh\"\nsource \"$HOME/.homesick/repos/homeshick/completions/homeshick-completion.bash\"\n\n# if [ $TILIX_ID ] || [ $VTE_VERSION ]; then\n# source /etc/profile.d/vte.sh\n# fi\n\n# F*EX\nexport PATH=\"$HOME/.fex/bin:$PATH\"\nsource $HOME/.local/bin/fexdox-completion.bash\n\neval $(dircolors ~/.dircolors)\n\nexport PATH=\"$HOME/bin:$PATH\"\n\n# Pyenv\nexport PYENV_ROOT=\"$HOME/.pyenv\"\nexport PATH=\"$PYENV_ROOT/bin:$PATH\"\neval \"$(pyenv init --path)\"\nif command -v pyenv 1>/dev/null 2>&1; then\n eval \"$(pyenv init -)\"\nfi\n\nexport PATH=\"$HOME/.local/bin:$PATH\"\n\n# Enable tab-completion for directories after variables\nshopt -s direxpand\n\n# br\n# source /home/tapia/.config/broot/launcher/bash/br\nalias br=broot\n\n# Generic colourizer\n[[ -s \"/etc/profile.d/grc.bashrc\" ]] && source /etc/profile.d/grc.bashrc\n\nalias ls='ls --color=auto'\nalias grep='grep --color=auto'\nalias diff='diff --color=auto'\n\nexport LESS_TERMCAP_mb=$'\\e[1;32m'\nexport LESS_TERMCAP_md=$'\\e[1;32m'\nexport LESS_TERMCAP_me=$'\\e[0m'\nexport LESS_TERMCAP_se=$'\\e[0m'\nexport LESS_TERMCAP_so=$'\\e[01;33m'\nexport LESS_TERMCAP_ue=$'\\e[0m'\nexport LESS_TERMCAP_us=$'\\e[1;4;31m'\n\n# Force xterm-color on ssh sessions\nalias ssh='TERM=xterm-256color ssh'\n\nif [ \"$TERM\" = \"xterm\" ]; then\n export TERM=xterm-256color\nfi\n\n# set PROMPT_COMMAND\nPROMPT_COMMAND=${PROMPT_COMMAND:+$PROMPT_COMMAND; }'printf \"\\033]0;%s@%s:%s\\007\" \"${USER}\" \"${HOSTNAME%%.*}\" \"${PWD/#$HOME/\\~}\"'\n\neval \"$(starship init bash)\"\n\nalias obs=\"QT_QPA_PLATFORM=xcb obs\"\n\n# Tmux with lf\n# alias mc='tmux new-session \\; send-keys lf C-m \\; split -h \\; send-keys lf C-m'\n\n\n# Install packages using yay (change to pacman/AUR helper of your choice)\nfunction yayinstall() {\n yay -Slq | fzf -q \"$1\" -m --preview 'yay -Si {1}'| xargs -ro yay -S\n}\n\n################################################\n# FZF config\n[ -f ~/.fzf.bash ] && source ~/.fzf.bash\n\nexport FZF_DEFAULT_OPTS='--color=bg+:#3B4252,bg:#2E3440,spinner:#81A1C1,hl:#616E88,fg:#D8DEE9,header:#616E88,info:#81A1C1,pointer:#d08770,marker:#d08770,fg+:#D8DEE9,prompt:#81A1C1,hl+:#81A1C1'\nexport FZF_ALT_C_OPTS='--height=40% --min-height=20'\nexport FZF_CTRL_T_OPTS='--height=40% --min-height=20'\n\n# Default commands\nexport FZF_DEFAULT_COMMAND=\"fd\"\nexport FZF_CTRL_T_COMMAND=\"$FZF_DEFAULT_COMMAND\"\nexport FZF_ALT_C_COMMAND=\"fd -t d\"\n\nsource ${HOME}/.local/share/fzf/fzf-git.bash\n\n\n# Pyhton colors\nnorm=\"$(printf '\\033[0m')\" #returns to \"normal\"\nbold=\"$(printf '\\033[0;1m')\" #set bold\nred=\"$(printf '\\033[0;31m')\" #set red\nboldyellowonblue=\"$(printf '\\033[0;1;33;44m')\"\nboldyellow=\"$(printf '\\033[0;1;33m')\"\nboldred=\"$(printf '\\033[0;1;31m')\" #set bold, and set red.\n\ncopython() {\n python $@ 2>&1 | sed -e \"s/Traceback/${boldyellowonblue}&${norm}/g\" \\\n -e \"s/File \\\".*\\.py\\\".*$/${boldyellow}&${norm}/g\" \\\n -e \"s/\\, line [[:digit:]]\\+/${boldred}&${norm}/g\"\n }\n\n\n# BEGIN_KITTY_SHELL_INTEGRATION\n# if test -n \"$KITTY_INSTALLATION_DIR\" -a -e \"$KITTY_INSTALLATION_DIR/shell-integration/bash/kitty.bash\"; then source \"$KITTY_INSTALLATION_DIR/shell-integration/bash/kitty.bash\"; fi\n# END_KITTY_SHELL_INTEGRATION\n\nsource ${HOME}/bin/fzf-beet.bash\n\nexport JULIA_NUM_THREADS=4\n" }, { "alpha_fraction": 0.5046461820602417, "alphanum_fraction": 0.5207290649414062, "avg_line_length": 23.120689392089844, "blob_id": "ca7d3fde70d6f218545b96a34676484ac938c4a5", "content_id": "0fa232583cd94fc5a6f13cee59a09be6f471e805", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2799, "license_type": "no_license", "max_line_length": 91, "num_lines": 116, "path": "/home/.config/nwg-wrapper/plan.py", "repo_name": "cristobaltapia/dotfiles_main", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nfrom datetime import date\nfrom textwrap import fill, wrap, shorten\nfrom os.path import expandvars\nimport ruamel.yaml as yaml\nfrom pandas import DateOffset\n\nMONTHS = {\n 1:\"Januar\",\n 2:\"Februar\",\n 3:\"März\",\n 4:\"April\",\n 5:\"Mai\",\n 6:\"Juni\",\n 7:\"Juli\",\n 8:\"August\",\n 9:\"September\",\n 10:\"Oktober\",\n 11:\"November\",\n 12:\"Dezember\"\n}\n\n\ndef main():\n # Read plan\n with open(expandvars(\"$HOME/Nextcloud/five_year_plan.yaml\"), \"r\") as stream:\n plan = yaml.safe_load(stream)\n\n out = format_output(plan)\n print(out)\n\n\ndef format_output(data):\n \"\"\"Format to correct pango markup.\n\n Parameters\n ----------\n data : TODO\n\n Returns\n -------\n TODO\n\n \"\"\"\n width = 34\n curr_year = date.today().year\n curr_month = date.today().month\n start_date = date.today() - DateOffset(months=2)\n start_year = start_date.year\n dates = [start_date + DateOffset(months=k) for k in range(36)]\n iter_dates = [(d.year, d.month) for d in dates]\n\n prev_year = dates[0].year\n list_plan = '<tt><b> ' + f\"{start_date.year}\".ljust(width) + \"</b></tt>\\n\"\n\n num_elements = 0\n max_elements = 30\n\n for year_i, month_i in iter_dates:\n if year_i > prev_year:\n list_plan += \"\\n\" + \"<tt><b> \" + f\"{year_i}\".ljust(width) + \"</b></tt>\\n\"\n prev_year += 1\n\n elements_i = data[year_i][month_i-1][MONTHS[month_i]]\n\n if month_i == curr_month and year_i == curr_year:\n color_month = \"#bf616a\"\n else:\n color_month = \"#eceff4\"\n\n if year_i == start_year:\n color_year = \"#81a1c1\"\n elif year_i == start_year + 1:\n color_year = \"#ebcb8b\"\n else:\n color_year = \"#a3be8c\"\n\n month_str = shorten(f\"{MONTHS[month_i]}\", 3, placeholder=\"\") + \" \"\n month_str = f'<span face=\"monospace\" foreground=\"{color_month}\">{month_str}</span>'\n\n list_year = []\n\n if elements_i:\n for ele_i in elements_i:\n entry_month = shorten(str(ele_i), width)\n\n list_year.append(entry_month)\n num_elements += 1\n else:\n entry_month = \"\"\n list_year.append(entry_month)\n num_elements += 1\n\n start = '<tt> </tt>'\n for l, e in enumerate(list_year):\n month_entry = f'<span foreground=\"{color_year}\">{e}</span>'\n if l > 0:\n list_plan += f\"{start}{month_entry}\\n\"\n else:\n list_plan += f\"{month_str}{month_entry}\\n\"\n\n if num_elements >= max_elements:\n break\n\n\n base = f\"\"\"<span face=\"sans\">\n<span foreground=\"#eceff4\">\n{list_plan[:-1]}\n</span>\n</span>\"\"\"\n\n return base\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.48847925662994385, "alphanum_fraction": 0.5115207433700562, "avg_line_length": 15.538461685180664, "blob_id": "72567ddb9b606e50930a7998adc83bac46c3bc86", "content_id": "70672ea5b8337732abe607912daabffe85970684", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 217, "license_type": "no_license", "max_line_length": 77, "num_lines": 13, "path": "/home/.local/share/fzf/fzf-fex.bash", "repo_name": "cristobaltapia/dotfiles_main", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\n# GIT heart Fex\n# -------------\n\nfzf-down() {\n fzf --height 60% --min-height 20 --border --bind ctrl-/:toggle-preview \"$@\"\n}\n\n_fl() {\n fexdox -i tapia -l |\n fzf-down -m --ansi --nth 2..,..\n}\n\n\n" }, { "alpha_fraction": 0.47816091775894165, "alphanum_fraction": 0.48521071672439575, "avg_line_length": 30.521739959716797, "blob_id": "c56672da4a5d7c9172c2af8a4b2dbeeca950499e", "content_id": "7da509919ad8b4b8b9269aae06a8e29425418259", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 6525, "license_type": "no_license", "max_line_length": 100, "num_lines": 207, "path": "/home/.config/nvim/lua/plugins/cmp.lua", "repo_name": "cristobaltapia/dotfiles_main", "src_encoding": "UTF-8", "text": "return {\n {\n 'hrsh7th/nvim-cmp',\n name = \"nvim-cmp\",\n event = { \"BufReadPost\", \"BufNewFile\" },\n dependencies = {\n { \"danymat/neogen\" },\n { 'hrsh7th/cmp-buffer', },\n { 'hrsh7th/cmp-omni', },\n { 'hrsh7th/cmp-path', },\n { 'hrsh7th/cmp-nvim-lsp', },\n { 'hrsh7th/cmp-nvim-lsp-signature-help', },\n { 'hrsh7th/cmp-cmdline', },\n { \"davidsierradz/cmp-conventionalcommits\", },\n { 'quangnguyen30192/cmp-nvim-ultisnips', },\n { 'rose-pine/neovim' },\n { 'windwp/nvim-autopairs' },\n },\n -- Configuration\n config = function()\n local cmp = require('cmp')\n local cmp_select_opts = { behavior = cmp.SelectBehavior.Select }\n local cmp_ultisnips_mappings = require('cmp_nvim_ultisnips.mappings')\n\n -- Define mappings for cmp\n local neogen = require('neogen')\n local cmp_mappings = {\n ['<C-p>'] = cmp.mapping.select_prev_item(cmp_select_opts),\n ['<C-n>'] = cmp.mapping.select_next_item(cmp_select_opts),\n ['<C-Space>'] = cmp.mapping(function()\n if cmp.visible() then\n print(\"here 1\")\n cmp.confirm({ select = true, behavior = 'replace' })\n else\n vim.fn[\"UltiSnips#ExpandSnippet\"]()\n end\n end,\n { \"i\", \"s\" }\n ),\n -- ['<C-Space>'] = cmp.mapping.complete(cmp_select),\n [\"<C-j>\"] = cmp.mapping(\n function(fallback)\n if neogen.jumpable() then\n neogen.jump_next()\n else\n cmp_ultisnips_mappings.jump_forwards(fallback)\n end\n end,\n { \"i\", \"s\" }\n ),\n [\"<C-z>\"] = cmp.mapping(\n function(fallback)\n if neogen.jumpable(true) then\n neogen.jump_prev()\n else\n cmp_ultisnips_mappings.jump_backwards(fallback)\n end\n end,\n { \"i\", \"s\" }\n ),\n ['<C-b>'] = cmp.mapping.scroll_docs(-4),\n ['<C-f>'] = cmp.mapping.scroll_docs(4),\n ['<C-e>'] = cmp.mapping.abort(),\n }\n\n -- Add parenthesis automatically after functions\n local cmp_autopairs = require('nvim-autopairs.completion.cmp')\n cmp.event:on(\n 'confirm_done',\n cmp_autopairs.on_confirm_done()\n )\n\n -- Define colors for the suggestion menu of nvim-cmp\n local p = require('rose-pine.palette')\n\n vim.api.nvim_set_hl(0, 'CmpItemKindVariable', { fg = p.foam })\n vim.api.nvim_set_hl(0, 'CmpItemKindClass', { fg = p.gold })\n vim.api.nvim_set_hl(0, 'CmpItemKindInterface', { fg = p.gold })\n vim.api.nvim_set_hl(0, 'CmpItemKindFunction', { fg = p.iris })\n vim.api.nvim_set_hl(0, 'CmpItemKindMethod', { fg = p.iris })\n vim.api.nvim_set_hl(0, 'CmpItemKindSnippet', { fg = p.iris })\n vim.api.nvim_set_hl(0, 'CmpItemKindKeyword', { fg = p.subtle })\n vim.api.nvim_set_hl(0, 'CmpItemKindText', { fg = p.subtle })\n\n local cmp_config = {\n -- Define sources to be used\n sources = {\n {\n name = \"nvim_lsp\",\n priority = 9,\n keyword_length = 1,\n -- Disable source for comments\n entry_filter = function(entry, ctx)\n local context = require 'cmp.config.context'\n return not context.in_treesitter_capture(\"comment\")\n and not context.in_syntax_group(\"Comment\")\n end\n },\n { name = 'omni' },\n {\n name = \"ultisnips\",\n priority = 10,\n -- Disable source for comments\n entry_filter = function(entry, ctx)\n local context = require 'cmp.config.context'\n return not context.in_treesitter_capture(\"comment\")\n and not context.in_syntax_group(\"Comment\")\n end\n },\n {\n name = \"path\",\n option = { trailin_slash = true },\n priority = 4\n },\n { name = \"latex_symbols\", priority = 2 },\n {\n name = 'buffer',\n keyword_length = 3,\n option = {\n get_bufnrs = function()\n local buf = vim.api.nvim_get_current_buf()\n local byte_size = vim.api.nvim_buf_get_offset(buf, vim.api.nvim_buf_line_count(buf))\n if byte_size > 1024 * 1024 then -- 1 Megabyte max\n return {}\n end\n return { buf }\n end\n }\n },\n },\n mapping = cmp_mappings,\n snippet = {\n expand = function(args)\n vim.fn[\"UltiSnips#Anon\"](args.body)\n end,\n },\n experimental = {\n ghost_text = { hl_group = 'DevIconCMake' },\n },\n performance = {\n max_view_entries = 40,\n fetching_timeout = 100,\n },\n formatting = {\n fields = { 'abbr', 'menu', 'kind' },\n format = function(entry, item)\n local short_name = {\n nvim_lsp = 'LSP',\n nvim_lua = 'nvim'\n }\n\n local menu_name = short_name[entry.source.name] or entry.source.name\n\n item.menu = string.format('[%s]', menu_name)\n return item\n end,\n },\n sorting = {\n comparators = {\n cmp.config.compare.sort_text,\n cmp.config.compare.offset,\n cmp.config.compare.exact,\n cmp.config.compare.score,\n cmp.config.compare.kind,\n cmp.config.compare.length,\n cmp.config.compare.order,\n }\n },\n window = {\n completion = {\n winhighlight = \"Normal:StatusLineNC,CursorLine:Substitute\",\n },\n documentation = {\n winhighlight = 'FloatBorder:WildMenu',\n border = \"rounded\",\n }\n },\n\n -- `/` cmdline setup.\n cmp.setup.cmdline('/', {\n mapping = cmp.mapping.preset.cmdline(),\n sources = {\n { name = 'buffer' }\n }\n }),\n\n -- `:` cmdline setup.\n cmp.setup.cmdline(':', {\n mapping = cmp.mapping.preset.cmdline(),\n sources = cmp.config.sources({\n { name = 'path' }\n }, {\n { name = 'cmdline', keyword_length = 2 }\n })\n }),\n }\n\n cmp.setup(cmp_config)\n end\n },\n {\n \"kdheepak/cmp-latex-symbols\",\n ft = \"julia\",\n dependencies = { \"nvim-cmp\" }\n },\n}\n-- vim: set shiftwidth=2:\n" }, { "alpha_fraction": 0.6866196990013123, "alphanum_fraction": 0.6901408433914185, "avg_line_length": 19.285715103149414, "blob_id": "5d9057e8c3450251dd530207e3f889d75129c2b8", "content_id": "a7d3005c53805fdc05352bdd9d177728d5da2f8b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 284, "license_type": "no_license", "max_line_length": 49, "num_lines": 14, "path": "/home/.bash_profile", "repo_name": "cristobaltapia/dotfiles_main", "src_encoding": "UTF-8", "text": "#\n# ~/.bash_profile\n#\n\n[[ -f ~/.bashrc ]] && . ~/.bashrc\n\n# Autocompletion for pubs\neval \"$(register-python-argcomplete pubs)\"\n\n#export PATH=\"${PATH}:/opt/anaconda/bin\"\nexport MPA=\"$HOME/Documents/MPA\"\nexport NVIM_GTK_NO_HEADERBAR=1\n\nsource /home/tapia/.config/broot/launcher/bash/br\n" }, { "alpha_fraction": 0.5376344323158264, "alphanum_fraction": 0.5423656105995178, "avg_line_length": 30.849315643310547, "blob_id": "cb00d53da1b920e3ccf485f6994a9ae1c7f8f1af", "content_id": "79bc4f1931b48114c9b409737606ada7b9ccebe0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 2325, "license_type": "no_license", "max_line_length": 99, "num_lines": 73, "path": "/home/.config/nvim/lua/plugins/python.lua", "repo_name": "cristobaltapia/dotfiles_main", "src_encoding": "UTF-8", "text": "return {\n {\n \"tpope/vim-dispatch\",\n ft = { \"typst\", \"python\", \"fortran\", \"make\" },\n keys = { \"<F5>\", \"<F6>\" },\n config = function()\n -- Execute programs asyncronously\n local function save_and_run_async()\n -- Save file\n vim.cmd.write()\n -- Get filetype\n local ft = vim.bo.filetype\n -- Execute command according to filetype\n local dir = vim.fn.fnameescape(vim.fn.expand(\"%:p:h\"))\n local filename = vim.fn.fnameescape(vim.fn.expand(\"%:p\"))\n\n if ft == \"typst\" then\n vim.cmd('Dispatch -compiler=typst -dir=' .. dir .. ' typst compile ' .. filename)\n elseif ft == \"fortran\" then\n vim.cmd('Dispatch -compiler=abaqus -dir=' .. dir .. ' abq2022 make library=' .. filename)\n elseif ft == \"python\" then\n vim.cmd('Dispatch -compiler=python -dir=' .. dir .. ' python ' .. filename)\n end\n end\n\n local function debug_async()\n -- Save file\n vim.cmd.write()\n -- Get filetype\n local ft = vim.bo.filetype\n -- Execute command according to filetype\n local dir = vim.fn.fnameescape(vim.fn.expand(\"%:p:h\"))\n local filename = vim.fn.fnameescape(vim.fn.expand(\"%:p\"))\n\n if ft == \"python\" then\n vim.cmd('Start -dir=' .. dir .. ' python -m pdb ' .. filename)\n end\n end\n\n vim.keymap.set(\"n\", \"<F5>\", save_and_run_async)\n vim.keymap.set(\"n\", \"<F6>\", debug_async)\n -- Deactivate default mappings\n vim.g.dispatch_no_maps = 1\n vim.g.dispatch_no_tmux_start = 1\n end,\n },\n {\n 'untitled-ai/jupyter_ascending.vim',\n event = \"BufEnter *.sync.py\",\n dependencies = {\n { 'kana/vim-textobj-user' },\n { 'GCBallesteros/vim-textobj-hydrogen' },\n },\n keys = {\n { \"<leader><leader>x\", \"<Plug>JupyterExecute\" },\n { \"<leader><leader>X\", \"<Plug>JupyterExecuteAll\" },\n { \"<leader><leader>r\", \"<Plug>JupyterRestart\" },\n },\n config = function()\n -- Delete default key bindings\n vim.keymap.del(\"n\", \"<space><space>x\")\n vim.keymap.del(\"n\", \"<space><space>X\")\n vim.keymap.del(\"n\", \"<space><space>r\")\n end\n },\n -- {\n -- 'stevearc/overseer.nvim',\n -- opts = {\n -- templates = { \"builtin\", \"user.python_build\" }\n -- },\n -- }\n}\n-- vim: set shiftwidth=2:\n" }, { "alpha_fraction": 0.6427207589149475, "alphanum_fraction": 0.6622911691665649, "avg_line_length": 28.716312408447266, "blob_id": "4d2f725416b0f459376c2b193c1ce7b4de631ea1", "content_id": "7274b8374edd74030d1547fa0339fdc5fb28f74f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 4194, "license_type": "no_license", "max_line_length": 192, "num_lines": 141, "path": "/home/.zshrc", "repo_name": "cristobaltapia/dotfiles_main", "src_encoding": "UTF-8", "text": "# The following lines were added by compinstall\n\nzstyle ':completion:*' completer _expand _complete _ignored _approximate\nzstyle ':completion:*' completions 1\nzstyle ':completion:*' glob 1\nzstyle ':completion:*' list-colors “${(s.:.)LS_COLORS}”\nzstyle ':completion:*' group-name ''\nzstyle ':completion:*' max-errors 2\nzstyle ':completion:*' substitute 1\nzstyle ':completion:*' file-sort modification\nzstyle ':completion:*' verbose true\nzstyle ':completion:*' menu select\nzstyle :compinstall filename '/home/tapia/.zshrc'\n\nzmodload zsh/complist\nbindkey -M menuselect 'h' vi-backward-char\nbindkey -M menuselect 'k' vi-up-line-or-history\nbindkey -M menuselect 'l' vi-forward-char\nbindkey -M menuselect 'j' vi-down-line-or-history\n\n# Surroundings\nautoload -Uz surround\nzle -N delete-surround surround\nzle -N add-surround surround\nzle -N change-surround surround\nbindkey -M vicmd cs change-surround\nbindkey -M vicmd ds delete-surround\nbindkey -M vicmd ys add-surround\nbindkey -M visual S add-surround\n\n# Completions:\n# - Homeshick\nsource \"$HOME/.homesick/repos/homeshick/homeshick.sh\"\nfpath=($HOME/.homesick/repos/homeshick/completions $fpath)\n# - Pyenv\nfpath=($HOME/.config/zsh/comp $fpath)\n\npath=($HOME/bin $path)\n\nautoload -Uz compinit\n# compinit\nif [ $(date +'%j') != $(date +\"%j\" -r $HOME/.zcompdump) ]; then\n compinit\nelse\n compinit -C\nfi\n\n# FEX\ncompdef _gnu_generic fexsend fexget fexpush \\\n fexpull fexstore fexpack fexzip fexsync \\\n autofex xx xxx sexsend sexget sexxx\n\nautoload -Uz promptinit\npromptinit\n\nautoload -Uz up-line-or-beginning-search down-line-or-beginning-search\nzle -N up-line-or-beginning-search\nzle -N down-line-or-beginning-search\n# End of lines added by compinstall\n# Lines configured by zsh-newuser-install\nHISTFILE=~/.histfile\nHISTSIZE=5000\nSAVEHIST=10000\nbindkey -v\n\nbindkey \"^?\" backward-delete-char\nbindkey \"^w\" backward-delete-word\n\n# Change cursor\n# source \"$HOME/.config/zsh/plugins/cursor_mode\"\n\n# Starship\neval \"$(starship init zsh)\"\n\n# Set language for the console\nexport LANG=\"en_US.UTF-8\"\nexport EDITORCMD=\"nvim-qt\"\nexport EDITOR=\"nvim\"\n\n#----------------------------------------------------------------------\n# Virtual Environment\nexport WORKON_HOME=~/.virtualenvs\n# source /usr/bin/virtualenvwrapper.sh\nexport VIRTUALENVWRAPPER_SCRIPT=/usr/bin/virtualenvwrapper.sh\n# Lazy-load virtualenvwrapper\nsource /usr/bin/virtualenvwrapper_lazy.sh\n\n#----------------------------------------------------------------------\n# Pyenv\nexport PYENV_ROOT=\"$HOME/.pyenv\"\ncommand -v pyenv >/dev/null || path=($PYENV_ROOT/bin $path)\neval \"$(pyenv init -)\"\n\n#----------------------------------------------------------------------\n# F*EX\npath=($HOME/.fex/bin $path)\n\n# Force xterm-color on ssh sessions\nalias ssh='TERM=xterm-256color ssh'\n\nif [ \"$TERM\" = \"xterm\" ]; then\n export TERM=xterm-256color\nfi\n\n#----------------------------------------------------------------------\n# Install packages using yay (change to pacman/AUR helper of your choice)\nfunction yayinstall() {\n yay -Slq | fzf -q \"$1\" -m --preview 'yay -Si {1}'| xargs -ro yay -S\n}\n\n#----------------------------------------------------------------------\n# FZF\nsource /usr/share/fzf/key-bindings.zsh\nsource /usr/share/fzf/completion.zsh\nsource ~/.local/share/fzf/fzf-git.bash\n\n# - Options\nexport FZF_DEFAULT_OPTS='--color=bg+:#3B4252,bg:#2E3440,spinner:#81A1C1,hl:#616E88,fg:#D8DEE9,header:#616E88,info:#81A1C1,pointer:#d08770,marker:#d08770,fg+:#D8DEE9,prompt:#81A1C1,hl+:#81A1C1'\nexport FZF_ALT_C_OPTS='--height=40% --min-height=20'\nexport FZF_CTRL_T_OPTS='--height=40% --min-height=20'\n# Default commands\nexport FZF_DEFAULT_COMMAND=\"fd\"\n# export FZF_DEFAULT_COMMAND=\"bfs\"\nexport FZF_CTRL_T_COMMAND=\"$FZF_DEFAULT_COMMAND\"\nexport FZF_ALT_C_COMMAND=\"fd -t d\"\n# export FZF_ALT_C_COMMAND=\"bfs -type d -nohidden \"\n\n#----------------------------------------------------------------------\n# Colors\nalias ls='ls --color=auto --group-directories-first'\nalias grep='grep --color=auto'\nalias diff='diff --color=auto'\n\nexport JULIA_NUM_THREADS=4\n\nexport MPA=\"$HOME/Documents/MPA\"\nexport NVIM_GTK_NO_HEADERBAR=1\n\nexport PYTHON_KEYRING_BACKEND=keyring.backends.null.Keyring\n\n[ -f ~/.fzf.zsh ] && source ~/.fzf.zsh\n" }, { "alpha_fraction": 0.5863853693008423, "alphanum_fraction": 0.6015127301216125, "avg_line_length": 26.88888931274414, "blob_id": "b920678edc1cbf64fb9d57c9816bc1c50d2e59ae", "content_id": "25d2bf3d263cc73f5a77957fe6110cacfe519c89", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2518, "license_type": "no_license", "max_line_length": 79, "num_lines": 90, "path": "/home/.password-store/.extensions/get.bash", "repo_name": "cristobaltapia/dotfiles_main", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n# pass get - Password Store Extension (https://www.passwordstore.org/)\n# Copyright (C) 2020 James Patrick\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n\nR='\\033[0;31m' \nG='\\033[0;32m' \nNC='\\033[0m'\n\nPASSWORD_STORE_DIR=\"${PASSWORD_STORE_DIR:-$HOME/.password-store}\"\n\ncmd_get_usage() {\n cat <<-_EOF\nUsage:\n $PROGRAM get pattern pass_name\n Parses the pass data for lines pulling out the value of browserpass's\n format of \"key:value\". The 'pattern' arg is a case insenitive, extended\n regex using grep -iE.\n\n https://github.com/browserpass/browserpass-extension\n\n Options:\n Nada.\n_EOF\n exit 0\n}\n\n\ncmd_get_assert(){\n test=\"$1\"\n local pattern=\"$2\" expected=\"$3\"\n\n actual=\"$(cmd_get \"$pattern\")\"\n if [[ \"$actual\" == \"$expected\" ]] ; then\n echo -e \"$G ✔ '$actual' == '$expected' $NC\"\n else\n echo -e \"$R ✘ f('$test', '$pattern') → '$actual' != '$expected' $NC\"\n fi\n}\n\ncmd_get_test(){\n cmd_get_assert \"username:value\" \"(user|username|login)\" \"value\"\n cmd_get_assert \" user: whitespace\" \"(user|username|login)\" \"whitespace\"\n\n val=\"multiple:lines\nvalue:pairs\"\n cmd_get_assert \"$val\" \"value\" \"pairs\"\n cmd_get_assert \"Key:capitalization doesn't counts\" \"key\" \\\n \"capitalization doesn't counts\"\n cmd_get_assert \"missing:empty\" \"i don't exist\" \"\"\n\n\n exit 0\n}\n\ncmd_get(){\n local pattern=\"$1\" pass_name=\"$2\"\n\n # valid the regex is valid\n echo | grep -qiE \"$pattern\"\n if [[ \"$?\" == 2 ]] ; then\n >&2 echo -e \"${R}Invalid Pattern:${NC} $pattern\"\n return 1\n fi \n \n echo \"${test:-$(cmd_show \"$pass_name\")}\" \\\n | grep -iE \"$pattern\" \\\n | head -1 \\\n | cut -d ':' -f2- \\\n | sed -e 's/^[ \\t]*//'\n}\n\n\n[[ \"$1\" == \"test\" ]] && cmd_get_test\n[[ \"$1\" == \"help\" || \"$1\" == \"--help\" || \"$1\" == \"-h\" ]] && cmd_get_usage\n[[ \"$#\" != \"2\" ]] && cmd_get_usage\ncmd_get \"$@\"\n\n\n" }, { "alpha_fraction": 0.45193371176719666, "alphanum_fraction": 0.46519336104393005, "avg_line_length": 23.45945930480957, "blob_id": "a9787f2465191b0112bb41b9f948abfe57b3e7cf", "content_id": "20fbedce07a07334915e1c3927382e7d1c802015", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 905, "license_type": "no_license", "max_line_length": 81, "num_lines": 37, "path": "/home/.config/nvim/lua/plugins/git.lua", "repo_name": "cristobaltapia/dotfiles_main", "src_encoding": "UTF-8", "text": "return {\n -- Fugitive\n {\n 'tpope/vim-fugitive',\n cmd = \"G\",\n keys = {\n { \"<leader>gs\", vim.cmd.Git },\n { \"<leader>gc\", \"<cmd>Git commit<cr>\" },\n },\n },\n -- Gitsigns\n {\n 'lewis6991/gitsigns.nvim',\n config = function(_, opts)\n require(\"gitsigns\").setup {\n on_attach = function(bufnr)\n local gitsigns = require('gitsigns')\n vim.keymap.set(\"n\", \"<leader>gn\",\n function()\n gitsigns.next_hunk({ foldopen = true })\n end\n )\n vim.keymap.set(\"n\", \"<leader>gN\",\n function()\n gitsigns.prev_hunk({ foldopen = true })\n end\n )\n vim.keymap.set(\"n\", \"<leader>gp\",\n gitsigns.preview_hunk\n )\n end\n }\n vim.api.nvim_set_hl(0, 'GitSignsAddLn', { fg = \"#434c5e\", bg = \"#a3be8c\" })\n end\n },\n}\n-- vim: set shiftwidth=2:\n" }, { "alpha_fraction": 0.7136150002479553, "alphanum_fraction": 0.7230046987533569, "avg_line_length": 25.625, "blob_id": "63f712d2901fc5751b0fc564f13014da15ecf221", "content_id": "47dc5cbc03d9a1751eff9a0c8b107852eaba8dbe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 213, "license_type": "no_license", "max_line_length": 32, "num_lines": 8, "path": "/home/.config/nvim/after/ftplugin/tex.lua", "repo_name": "cristobaltapia/dotfiles_main", "src_encoding": "UTF-8", "text": "-- Set spelling\nvim.cmd('syntax spell toplevel')\nvim.opt_local.spell = true\n-- Define tex flavor to latex\nvim.g.tex_flavor = 'latex'\n-- Set tab to two-spaces\nvim.opt_local.shiftwidth = 2\nvim.opt_local.tabstop = 2\n" }, { "alpha_fraction": 0.49486976861953735, "alphanum_fraction": 0.5059195160865784, "avg_line_length": 28.465116500854492, "blob_id": "417e236a0237f91ab6b7ff65959b21f412bb2024", "content_id": "7ef3d01320dd269097d420c1a17de133c31e2e0f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 2534, "license_type": "no_license", "max_line_length": 94, "num_lines": 86, "path": "/home/.config/nvim/lua/plugins/treesitter.lua", "repo_name": "cristobaltapia/dotfiles_main", "src_encoding": "UTF-8", "text": "return {\n {\n 'nvim-treesitter/nvim-treesitter',\n event = { \"BufReadPost\", \"BufNewFile\" },\n build = \":TSUpdate\",\n dependencies = { \"nvim-treesitter/nvim-treesitter-textobjects\" },\n opts = {\n -- A list of parser names, or \"all\" (the five listed parsers should always be installed)\n ensure_installed = {\n \"bash\",\n \"bibtex\",\n \"comment\",\n \"css\",\n \"dockerfile\",\n \"javascript\",\n \"json\",\n \"julia\",\n \"lua\",\n \"python\",\n \"query\",\n \"toml\",\n \"vim\",\n \"vimdoc\",\n \"yaml\",\n \"yuck\",\n },\n\n -- Install parsers synchronously (only applied to `ensure_installed`)\n sync_install = false,\n auto_install = false,\n highlight = {\n enable = true,\n additional_vim_regex_highlighting = false,\n },\n rainbow = {\n enable = true,\n colors = {\n \"#eceff4\",\n \"#88c0d0\",\n \"#ebcb8b\",\n \"#81a1c1\",\n \"#d08770\",\n },\n -- Highlight also non-parentheses delimiters, boolean or table: lang -> boolean\n extended_mode = false,\n -- Do not enable for files with more than 1000 lines, int\n max_file_lines = 1000,\n },\n textobjects = {\n select = {\n enable = true,\n -- Automatically jump forward to textobj, similar to targets.vim\n lookahead = true,\n keymaps = {\n -- You can use the capture groups defined in textobjects.scm\n [\"af\"] = \"@function.outer\",\n [\"if\"] = \"@function.inner\",\n [\"ac\"] = \"@class.outer\",\n [\"ic\"] = { query = \"@class.inner\", desc = \"Select inner part of a class region\" },\n },\n selection_modes = {\n ['@parameter.outer'] = 'v', -- charwise\n ['@function.outer'] = 'V', -- linewise\n ['@class.outer'] = 'V', -- blockwise\n },\n include_surrounding_whitespace = false,\n },\n },\n },\n config = function(_, opts)\n require(\"nvim-treesitter.configs\").setup(opts)\n -- Foldings\n vim.opt.foldmethod = \"expr\"\n vim.opt.foldexpr = \"nvim_treesitter#foldexpr()\"\n vim.opt.foldnestmax = 2\n vim.opt.foldenable = false\n\n vim.cmd('hi pythonTSParameter guifg=#b48ead')\n vim.cmd('hi TSConstant guifg=#ebcb8b')\n\n end\n },\n { \"nvim-treesitter/nvim-treesitter-textobjects\", },\n { 'nvim-treesitter/playground' },\n}\n-- vim: set shiftwidth=2:\n" }, { "alpha_fraction": 0.5164835453033447, "alphanum_fraction": 0.5252183675765991, "avg_line_length": 28.575000762939453, "blob_id": "edc4298f60815d535c736fce3333ba6ffeb71712", "content_id": "54f6575b5d7b900519c5aea32514074e45a1c033", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 3549, "license_type": "no_license", "max_line_length": 88, "num_lines": 120, "path": "/home/.config/nvim/lua/plugins/latex.lua", "repo_name": "cristobaltapia/dotfiles_main", "src_encoding": "UTF-8", "text": "return {\n -- Integration with latex\n {\n 'lervag/vimtex',\n ft = { \"tex\", \"cls\", \"sty\", \"tikz\" },\n dependencies = { \"hrsh7th/nvim-cmp\" },\n keys = {\n { \"<localleader>ll\", \"<Plug>(vimtex-compile-ss)\" },\n { \"<localleader>ll\", \"<Plug>(vimtex-compile-selected)\", mode = \"v\" },\n { \"<localleader>lv\", \"<Plug>(vimtex-view)\" },\n { \"<localleader>lo\", \"<Plug>(vimtex-compile-output)\" },\n { \"<localleader>rf\", \"<cmd>VimtexRefreshFolds<cr>\" },\n },\n config = function()\n vim.g.tex_flavor = 'latex'\n\n -- Set autocompletion\n require('cmp').setup.buffer {\n formatting = {\n format = function(entry, vim_item)\n vim_item.menu = ({\n omni = (vim.inspect(vim_item.menu):gsub('%\"', \"\")),\n buffer = \"[Buffer]\",\n -- formatting for other sources\n })[entry.source.name]\n return vim_item\n end,\n },\n sources = {\n { name = 'omni', priority = 9 },\n { name = 'buffer' },\n { name = \"path\", priority = 4 },\n {\n name = \"ultisnips\",\n priority = 10,\n -- Disable source for comments\n entry_filter = function(entry, ctx)\n local context = require 'cmp.config.context'\n return not context.in_treesitter_capture(\"comment\")\n and not context.in_syntax_group(\"Comment\")\n end\n },\n -- other sources\n },\n }\n\n -- Set options for vimtex\n vim.g.vimtex_compiler_latexmk = {\n out_dir = 'out',\n aux_dir = '.aux',\n callback = 1,\n continuous = 0,\n executable = 'latexmk',\n options = { '-verbose',\n '-shell-escape',\n '-file-line-error',\n '-synctex=1',\n '-interaction=nonstopmode',\n },\n }\n\n vim.g.vimtex_compiler_latexmk_engines = { _ = '-lualatex' }\n\n vim.g.vimtex_complete_enabled = 1\n vim.g.vimtex_complete_close_braces = 0\n vim.g.vimtex_complete_ignore_case = 1\n vim.g.vimtex_complete_smart_case = 1\n vim.g.vimtex_fold_enabled = 1\n\n if vim.fn.executable('bibtexparser') then\n vim.g.vimtex_parser_bib_backend = 'bibtexparser'\n end\n\n vim.g.vimtex_syntax_conceal = {\n accents = 1,\n ligatures = 0,\n cites = 1,\n fancy = 0,\n spacing = 0,\n greek = 0,\n math_bounds = 0,\n math_delimiters = 0,\n math_fracs = 0,\n math_super_sub = 0,\n math_symbols = 1,\n sections = 0,\n styles = 1,\n }\n\n -- vim.l.concealcursor = ''\n\n vim.g.vimtex_indent_enabled = true\n vim.g.vimtex_indent_on_ampersands = false\n vim.g.vimtex_indent_bib_enabled = false\n vim.g.vimtex_fold_bib_enabled = false\n\n vim.g.vimtex_imaps_leader = '#'\n vim.g.vimtex_quickfix_method = 'latexlog'\n vim.g.matchup_override_vimtex = true\n vim.g.vimtex_compiler_progname = vim.env.HOME .. '/.virtualenvs/py3neovim/bin/nvr'\n\n vim.g.vimtex_view_method = 'zathura'\n vim.g.vimtex_view_forward_search_on_start = true\n\n vim.g.vimtex_echo_verbose_input = 0\n\n vim.g.vimtex_quickfix_autoclose_after_keystrokes = 2\n vim.g.vimtex_quickfix_open_on_warning = 0\n\n -- Delimiter modifiers\n vim.g.vimtex_delim_toggle_mod_list = {\n { '\\\\left', '\\\\right' },\n { '\\\\mleft', '\\\\mright' },\n }\n end\n },\n -- Transfor latex symbols to unicode\n { 'joom/latex-unicoder.vim' },\n}\n-- vim: set shiftwidth=2:\n" }, { "alpha_fraction": 0.5978915691375732, "alphanum_fraction": 0.6069276928901672, "avg_line_length": 26.625, "blob_id": "b434a92ab11395af34a200267a8c35b7f8265731", "content_id": "c36d180fc0cba7b05454e39c708fe026f3b1dbae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 664, "license_type": "no_license", "max_line_length": 80, "num_lines": 24, "path": "/home/.config/nvim/lua/tapia/autocmd.lua", "repo_name": "cristobaltapia/dotfiles_main", "src_encoding": "UTF-8", "text": "-- Define some useful autocommands\n\nvim.api.nvim_create_autocmd({ 'FileType' }, {\n pattern = { 'lua' },\n callback = function()\n vim.opt.expandtab = true\n vim.opt.softtabstop = 4\n vim.opt.shiftwidth = 4\n vim.opt.tabstop = 4\n end,\n})\n\nlocal asyncrun_group = vim.api.nvim_create_augroup('asyncrun', { clear = true })\n\nvim.api.nvim_create_autocmd({ 'User' }, {\n pattern = { 'AsyncRunStart' },\n group = asyncrun_group,\n command = 'call asyncrun#quickfix_toggle(10, 1)',\n})\nvim.api.nvim_create_autocmd({ 'User' }, {\n pattern = { 'AsyncRunStop' },\n group = asyncrun_group,\n command = \"copen | clast | wincmd k\",\n})\n\n" }, { "alpha_fraction": 0.33760684728622437, "alphanum_fraction": 0.3390313386917114, "avg_line_length": 25, "blob_id": "0c2df168a962a77ab7c1186c409bab2000306147", "content_id": "87e3b1851e67cac1a2757f645f1fa7932ffa2e1e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 702, "license_type": "no_license", "max_line_length": 57, "num_lines": 27, "path": "/home/.config/nvim/lua/overseer/template/user/python_build.lua", "repo_name": "cristobaltapia/dotfiles_main", "src_encoding": "UTF-8", "text": "return {\n name = \"python_build\",\n builder = function()\n -- Full path to current file (see :help expand())\n local file = vim.fn.expand(\"%:p\")\n return {\n cmd = { \"python\" },\n args = { file },\n components = {\n { \"on_exit_set_status\" },\n {\n \"display_duration\",\n detail_level = 2,\n },\n {\n \"on_output_quickfix\",\n open = true,\n items_only = false,\n tail = true,\n },\n },\n }\n end,\n condition = {\n filetype = { \"python\" },\n },\n}\n" }, { "alpha_fraction": 0.774193525314331, "alphanum_fraction": 0.774193525314331, "avg_line_length": 30, "blob_id": "2d7633a2450ccc33850d82636fdfc4823583d915", "content_id": "844318b10e71f619b6632be2c1b7027dbc0677b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 31, "license_type": "no_license", "max_line_length": 30, "num_lines": 1, "path": "/home/.config/nvim/after/ftplugin/pandoc.lua", "repo_name": "cristobaltapia/dotfiles_main", "src_encoding": "UTF-8", "text": "vim.opt_local.formatexpr = nil\n" }, { "alpha_fraction": 0.5241379141807556, "alphanum_fraction": 0.5413793325424194, "avg_line_length": 23.16666603088379, "blob_id": "8439b25e5e91c9225c5b6c46dee912d874da73c3", "content_id": "97a47de4944d7c718a9dcc9767831b3acf6b2cb7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 290, "license_type": "no_license", "max_line_length": 89, "num_lines": 12, "path": "/home/.config/nvim/lua/plugins/typst.lua", "repo_name": "cristobaltapia/dotfiles_main", "src_encoding": "UTF-8", "text": "return {\n {\n 'kaarmu/typst.vim',\n ft = { \"typst\" },\n branch = 'main',\n dependencies = { { 'skywind3000/asyncrun.vim' } },\n config = function()\n vim.keymap.set(\"n\", \"<leader>lv\", \"<cmd>AsyncRun -mode=hide zathura '%<'.pdf <cr>\")\n end\n },\n}\n-- vim: set shiftwidth=2:\n" }, { "alpha_fraction": 0.5249999761581421, "alphanum_fraction": 0.5249999761581421, "avg_line_length": 25.66666603088379, "blob_id": "467ff0002559e0b6595720df20cb96e999349d1a", "content_id": "a412b42c24fb5e212783e89467a5aabc3dddc584", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 160, "license_type": "no_license", "max_line_length": 45, "num_lines": 6, "path": "/home/.config/nvim/after/ftplugin/gitcommit.lua", "repo_name": "cristobaltapia/dotfiles_main", "src_encoding": "UTF-8", "text": "require 'cmp'.setup.buffer {\n sources = require 'cmp'.config.sources(\n { { name = 'conventionalcommits' } },\n { { name = 'buffer' } }\n ),\n}\n" }, { "alpha_fraction": 0.7816091775894165, "alphanum_fraction": 0.7816091775894165, "avg_line_length": 23.85714340209961, "blob_id": "ac890e13d25f46c20061366c4474a86ece556ddf", "content_id": "002568a7a7729a945cf639bd56cfa6f82165d9d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 174, "license_type": "no_license", "max_line_length": 54, "num_lines": 7, "path": "/home/.pylintrc", "repo_name": "cristobaltapia/dotfiles_main", "src_encoding": "UTF-8", "text": "[MESSAGES CONTROL]\n\ndisable=too-many-locals,missing-docstring,invalid-name\n\n[VARIABLES]\n# Good variable names,which should always be accepted\ngood-names=i,j,k,ix,ij,ik,x,y,z\n" }, { "alpha_fraction": 0.6275992393493652, "alphanum_fraction": 0.6389414072036743, "avg_line_length": 22.46666717529297, "blob_id": "bf7750d5ca94cd60214a7ea05d27f3336fe7afcc", "content_id": "c55502739b69ea67f7f3e660d37e13d8fc75c096", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1058, "license_type": "no_license", "max_line_length": 73, "num_lines": 45, "path": "/home/bin/yalafi-grammarous", "repo_name": "cristobaltapia/dotfiles_main", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n#\n# interface between vim-grammarous or vim-LanguageTool and yalafi.shell\n# - collect all options passed by editor, except --api\n# - languagetool-commandline.jar is a bit picky with repeated options\n#\n\n# LT's base directory\n#\nltdir=~/.vim/plugged/vim-grammarous/misc/LanguageTool-4.8\n\n# comment out to use languagetool-commandline\n#\nuse_server=\"--server my\"\n\n# vim-grammarous needs byte offsets in XML report\n# --> set to xml-b\n#\noutput=xml-b\noutput=xml\n\nopts=x\nlang=en-GB\nwhile [ $# -gt 1 ]\ndo\n if [ X$1 == X-l ]\n then\n # languagetool-commandline does not like multiple language specs\n shift\n lang=$1\n elif [ X$1 == X-c ]\n then\n # languagetool-commandline does not like multiple encoding specs\n # (yalafi.shell already includes --encoding utf-8)\n shift\n elif [ X$1 != X--api ]\n then\n opts+=\"$1 \"\n fi\n shift\ndone\n\npython3 -m yalafi.shell $use_server --output $output --language $lang \\\n --lt-directory $ltdir --lt-options \"$opts\" $1 2>/dev/null\n\n\n" }, { "alpha_fraction": 0.4814264476299286, "alphanum_fraction": 0.5061911940574646, "avg_line_length": 35.05356979370117, "blob_id": "b63b820a720a8521008908c2501befa3522435ee", "content_id": "5be70bd494683b5e033330e651189918d80eac66", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 2025, "license_type": "no_license", "max_line_length": 144, "num_lines": 56, "path": "/home/.config/nvim/lua/tapia/init.lua", "repo_name": "cristobaltapia/dotfiles_main", "src_encoding": "UTF-8", "text": "require(\"tapia.remap\")\nrequire(\"tapia.set\")\nrequire(\"tapia.autocmd\")\n\nvim.opt.guicursor =\n\"n-v-c:block,i-ci-ve:ver25,r-cr:hor20,o:hor50,a:blinkwait700-blinkoff400-blinkon250-Cursor/lCursor,sm:block-blinkwait175-blinkoff150-blinkon175\"\n\n-- Define format for quickfix window\nfunction _G.qftf(info)\n local items\n local ret = {}\n if info.quickfix == 1 then\n items = vim.fn.getqflist({ id = info.id, items = 0 }).items\n else\n items = vim.fn.getloclist(info.winid, { id = info.id, items = 0 }).items\n end\n local limit = 31\n local fname_fmt1, fname_fmt2 = '%-' .. limit .. 's', '…%.' .. (limit - 1) .. 's'\n local valid_fmt = '%s │%5d:%2d│%s %s'\n for i = info.start_idx, info.end_idx do\n local e = items[i]\n local fname = ''\n local str\n if e.valid == 1 then\n if e.bufnr > 0 then\n fname = vim.fn.bufname(e.bufnr)\n if fname == '' then\n fname = '[No Name]'\n else\n fname = fname:gsub('^' .. vim.env.HOME, '~')\n end\n -- char in fname may occur more than 1 width, ignore this issue in order to keep performance\n if #fname <= limit then\n fname = fname_fmt1:format(fname)\n else\n fname = fname_fmt2:format(fname:sub(1 - limit))\n end\n end\n local lnum = e.lnum > 99999 and -1 or e.lnum\n local cnum = e.col\n local qtype = e.type == '' and '' or ' ' .. e.type:sub(1, 1):upper()\n local module\n if string.len(e.module) > 1 and string.len(e.text) > 1 then\n module = e.module .. \": \"\n else\n module = e.module\n end\n local textm = e.text:gsub(\"\\n\", ' ')\n str = valid_fmt:format(fname, lnum, cnum, qtype, module .. textm)\n else\n str = e.text\n end\n table.insert(ret, str)\n end\n return ret\nend\n" }, { "alpha_fraction": 0.5099928379058838, "alphanum_fraction": 0.5171306133270264, "avg_line_length": 26.20388412475586, "blob_id": "d72f69a80924a79d2c049996b87339136e75e6f3", "content_id": "01a79f0d7a90c89e9079f79840b21e1581e41847", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 2802, "license_type": "no_license", "max_line_length": 87, "num_lines": 103, "path": "/home/.config/nvim/lua/plugins/ui.lua", "repo_name": "cristobaltapia/dotfiles_main", "src_encoding": "UTF-8", "text": "return {\n -- Colorschemes\n {\n 'shaunsingh/nord.nvim',\n name = 'nord',\n config = function()\n vim.cmd('colorscheme nord')\n vim.api.nvim_set_hl(0, \"FloatBorder\", { fg = \"#ebcb8b\", bold = true })\n vim.api.nvim_set_hl(0, \"NormalFloat\", { bg = \"#434c5e\" })\n end\n },\n -- Tmux\n {\n 'jpalardy/vim-slime',\n keys = {\n { \"<C-c><C-c>\", mode = \"n\" },\n { \"<C-c><C-c>\", mode = \"v\" },\n },\n config = function()\n vim.g.slime_target = \"tmux\"\n vim.g.slime_default_config = { socket_name = \"default\", target_pane = \"0.1\" }\n vim.g.slime_bracketed_paste = 1\n end\n },\n -- Status line\n {\n 'nvim-lualine/lualine.nvim',\n event = { \"BufReadPost\", \"BufNewFile\" },\n dependencies = {\n 'nvim-tree/nvim-web-devicons',\n },\n opts = {\n extensions = { 'quickfix', 'fugitive' },\n tabline = {\n lualine_a = { 'buffers' },\n lualine_z = { function()\n return [[buffers]]\n end },\n },\n },\n },\n -- FZF support\n {\n 'junegunn/fzf.vim',\n dependencies = {\n { 'junegunn/fzf', build = './install --all' },\n },\n },\n -- Integration with tmux\n {\n 'christoomey/vim-tmux-navigator',\n lazy = true,\n keys = {\n { \"<A-h>\", \"<cmd>TmuxNavigateLeft<cr>\", \"n\", { silent = true } },\n { \"<A-j>\", \"<cmd>TmuxNavigateDown<cr>\", \"n\", { silent = true } },\n { \"<A-k>\", \"<cmd>TmuxNavigateUp<cr>\", \"n\", { silent = true } },\n { \"<A-l>\", \"<cmd>TmuxNavigateRight<cr>\", \"n\", { silent = true } },\n },\n init = function()\n vim.g.tmux_navigator_no_mappings = true\n end\n },\n -- Nice visualization of quickfix errors\n {\n 'kevinhwang91/nvim-bqf',\n config = function()\n vim.o.qftf = '{info -> v:lua._G.qftf(info)}'\n\n require('bqf').setup()\n vim.api.nvim_set_hl(0, 'BqfPreviewTitle', { bg = '#ebcb8b', fg = '#3b4252' })\n end\n },\n -- Overview of code\n {\n 'stevearc/aerial.nvim',\n ft = { \"python\", \"julia\", \"tex\", \"markdown\", \"typst\" },\n dependencies = {\n -- \"nvim-treesitter/nvim-treesitter\",\n \"nvim-tree/nvim-web-devicons\"\n },\n keys = { { \"<leader>a\", \"<cmd>AerialToggle<cr>\" } },\n config = function()\n local aerial = require('aerial')\n require('aerial').setup({\n -- optionally use on_attach to set keymaps when aerial has attached to a buffer\n on_attach = function(bufnr)\n -- Jump forwards/backwards with '{' and '}'\n vim.keymap.set('n', '}', aerial.next, { buffer = bufnr })\n vim.keymap.set('n', '{', aerial.prev, { buffer = bufnr })\n end\n })\n end\n },\n -- Better display of errors\n {\n \"folke/trouble.nvim\",\n dependencies = \"web-devicons\",\n keys = {\n { \"<leader>tt\", \"<cmd>TroubleToggle<cr>\", \"n\" },\n }\n },\n}\n-- vim: set shiftwidth=2:\n" }, { "alpha_fraction": 0.5759046673774719, "alphanum_fraction": 0.5781112313270569, "avg_line_length": 30.041095733642578, "blob_id": "e28bfd847d06b293b6668f2cfb3635b35d472a67", "content_id": "62267b87736a5543b1f91b9f780a0d13904f79a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 2266, "license_type": "no_license", "max_line_length": 88, "num_lines": 73, "path": "/home/.config/nvim/lua/tapia/remap.lua", "repo_name": "cristobaltapia/dotfiles_main", "src_encoding": "UTF-8", "text": "vim.keymap.set(\"n\", \"<leader>pv\", vim.cmd.Ex)\n\n-- Allow to move lines in visual mode\nvim.keymap.set(\"v\", \"J\", \":m '>+1<CR>gv=gv\")\nvim.keymap.set(\"v\", \"K\", \":m '<-2<CR>gv=gv\")\n-- Keep search items in the middle of the screen\nvim.keymap.set(\"n\", \"n\", \"nzzzv\")\nvim.keymap.set(\"n\", \"N\", \"Nzzzv\")\n\n-- Paste without deleting the previous selection\nvim.keymap.set(\"x\", \"<leader>p\", [[\"_dP]])\n-- Yank to system clipboard\n-- vim.keymap.set({\"n\", \"v\"}, \"<leader>y\", [[\"+y]])\n-- vim.keymap.set(\"n\", \"<leader>Y\", [[\"+Y]])\n\nvim.keymap.set(\"n\", \"Q\", \"<nop>\")\n\n-- Replace word below the cursor\nvim.keymap.set(\"n\", \"<leader>s\", [[:%s/\\<<C-r><C-w>\\>/<C-r><C-w>/gI<Left><Left><Left>]])\n\n-- Correct spelling in insert mode\nvim.keymap.set(\"i\", \"<C-s>\", \"<c-g>u<Esc>[s1z=`]a<c-g>u\")\n\n-- Faster folding\nvim.keymap.set({ \"n\", \"v\" }, \"<space>\", \"za\")\n\n-- Map <Esc> to shift-space\nvim.keymap.set({ \"i\", \"v\", \"s\" }, \"<S-Space>\", \"<Esc>\")\nvim.keymap.set({ \"i\", \"v\", \"s\" }, \"<M-Space>\", \"<Esc>\")\n\n-- Redefine movements\nvim.keymap.set(\"n\", \"j\", \"gj\")\nvim.keymap.set(\"n\", \"k\", \"gk\")\nvim.keymap.set(\"n\", \"gj\", \"j\")\nvim.keymap.set(\"n\", \"gk\", \"k\")\n\n-- Escape to normal mode when in terminal\nvim.keymap.set(\"t\", \"<Esc>\", \"<C-\\\\><C-n>\")\n\n-- For the quickfix window it is better to undo the previous remapping\nlocal qf_group = vim.api.nvim_create_augroup('quickfix', { clear = true })\nvim.api.nvim_create_autocmd({ 'FileType' }, {\n pattern = { 'qf' },\n group = qf_group,\n callback = function()\n vim.keymap.set('n', 'j', 'j', { buffer = true })\n vim.keymap.set('n', 'k', 'k', { buffer = true })\n end,\n})\n\n-- Move to next buffer\nvim.keymap.set(\"n\", \"tn\", \"<cmd>bnext<cr>\")\nvim.keymap.set(\"n\", \"tp\", \"<cmd>bprevious<cr>\")\n\n-- Close current buffer\nvim.keymap.set(\"n\", \"<leader>bq\", \"<cmd>bp <BAR> bd #<cr>\")\n\n-- Close quickfix window\nvim.keymap.set(\"n\", \"<leader>cq\", vim.cmd.cclose)\n\n-- Run files with overseer\n-- local overseer = require(\"overseer\")\n-- local function compile_and_run()\n-- local file_path = vim.api.nvim_buf_get_name(0)\n-- local file_cwd = vim.loop.cwd()\n\n-- if vim.bo.filetype == 'python' then\n-- vim.cmd(\"write\")\n-- overseer.run_template({ name = \"python_build\" })\n-- end\n-- end\n\n-- vim.keymap.set('n', '<F5>', compile_and_run)\n" }, { "alpha_fraction": 0.4417218565940857, "alphanum_fraction": 0.4562913775444031, "avg_line_length": 25.964284896850586, "blob_id": "972588f8474f1be552ed4704c95c32c03be68730", "content_id": "77c513644314249cf764242b60dfd231259e7a9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 1510, "license_type": "no_license", "max_line_length": 106, "num_lines": 56, "path": "/home/.config/nvim/lua/plugins/telescope.lua", "repo_name": "cristobaltapia/dotfiles_main", "src_encoding": "UTF-8", "text": "return {\n {\n 'nvim-telescope/telescope.nvim',\n tag = '0.1.1',\n dependencies = { { 'nvim-lua/plenary.nvim' } },\n cmd = \"Telescope\",\n keys = {\n { \"<leader>ff\", function() require(\"telescope.builtin\").find_files() end, desc = \"find files\" },\n { \"<leader>fg\", function() require(\"telescope.builtin\").live_grep() end, desc = \"live grep\" },\n { \"<leader>fb\", function() require(\"telescope.builtin\").buffers() end, desc = \"find buffers\" },\n { \"<C-p>\", function() require(\"telescope.builtin\").git_files() end, desc = \"find git files\" },\n {\n \"<leader>ps\",\n function()\n require(\"telescope.builtin\").grep_string({ search = vim.fn.input(\"Grep > \") });\n end,\n desc = \"Grep\"\n },\n },\n opts = {\n defaults = {\n scroll_strategy = \"limit\",\n winblend = 30,\n sorting_strategy = 'ascending',\n layout_strategy = 'vertical',\n layout_config = {\n vertical = {\n width = 0.85,\n height = 0.95,\n preview_cutoff = 10,\n mirror = false,\n },\n center = {\n width = 0.85,\n height = 0.95,\n preview_cutoff = 10,\n prompt_position = 'top',\n },\n },\n file_ignore_patterns = {\n \"%.pyc\",\n \"%.rpy\",\n \"%.fil\",\n }\n },\n pickers = {\n find_files = {\n follow = true,\n }\n }\n\n }\n\n },\n}\n-- vim: set shiftwidth=2:\n" }, { "alpha_fraction": 0.4535752534866333, "alphanum_fraction": 0.45464247465133667, "avg_line_length": 22.424999237060547, "blob_id": "851dc906113c1e27ba6f87905a044f710e8a3e11", "content_id": "fbda229fc891982490748f20acc2d4e316672314", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 937, "license_type": "no_license", "max_line_length": 55, "num_lines": 40, "path": "/home/.config/nvim/lua/plugins/coding.lua", "repo_name": "cristobaltapia/dotfiles_main", "src_encoding": "UTF-8", "text": "return {\n {\n \"ThePrimeagen/refactoring.nvim\",\n dependencies = {\n { \"nvim-lua/plenary.nvim\" },\n -- { \"nvim-treesitter/nvim-treesitter\" }\n },\n ft = { \"python\", \"lua\" },\n keys = {\n {\n \"<leader>re\",\n \"<cmd>Refactor extract<cr>\",\n mode = \"v\",\n { noremap = true, silent = true, expr = false }\n },\n {\n \"<leader>rf\",\n \"<cmd>Refactor extract_to_file<cr>\",\n mode = \"v\",\n { noremap = true, silent = true, expr = false }\n },\n {\n \"<leader>rv\",\n \"<cmd>Refactor extract_var<cr>\",\n mode = \"v\",\n { noremap = true, silent = true, expr = false }\n },\n {\n \"<leader>ri\",\n \"<cmd>Refactor inline_var<cr>\",\n mode = \"v\",\n { noremap = true, silent = true, expr = false }\n },\n },\n config = function()\n require(\"refactoring\").setup({})\n end\n },\n}\n-- vim: set shiftwidth=2:\n" }, { "alpha_fraction": 0.699999988079071, "alphanum_fraction": 0.699999988079071, "avg_line_length": 19, "blob_id": "f19b80d7e2dd7689172193a70de80610849d5b25", "content_id": "98f1b5a5bebcd286bfd2219885a5ac5e93c32db5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 20, "license_type": "no_license", "max_line_length": 19, "num_lines": 1, "path": "/home/.config/nvim/after/ftplugin/wiki.lua", "repo_name": "cristobaltapia/dotfiles_main", "src_encoding": "UTF-8", "text": "vim.opt.wrap = true\n" }, { "alpha_fraction": 0.7272727489471436, "alphanum_fraction": 0.8281444311141968, "avg_line_length": 16.45652198791504, "blob_id": "621c7d47430e6d5d02d697aca4c0820c0966fcef", "content_id": "e03b6a055d64f6d372c8eef2a284b3f452cd00d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 803, "license_type": "no_license", "max_line_length": 47, "num_lines": 46, "path": "/home/.config/foot/foot.ini", "repo_name": "cristobaltapia/dotfiles_main", "src_encoding": "UTF-8", "text": "font=FuraCode Nerd Font:size=12:style=medium\nfont-bold=FuraCode Nerd Font:size=12:style=bold\ndpi-aware=no\nbold-text-in-bright=false\nbox-drawings-uses-font-glyphs=true\n\n[tweak]\ngrapheme-shaping=yes\n\n[scrollback]\nlines=2000\n\n[cursor]\nblink=true\n\n[colors]\n# Nord color palette\nalpha=0.8\nforeground=d8dee9\nbackground=2e3440\nregular0=3b4252\nregular1=bf616a\nregular2=a3be8c\nregular3=ebcb8b\nregular4=81a1c1\nregular5=b48ead\nregular6=88c0d0\nregular7=e5e9f0\nbright0=4c566a\nbright1=bf616a\nbright2=a3be8c\nbright3=ebcb8b\nbright4=81a1c1\nbright5=b48ead\nbright6=8fbcbb\nbright7=eceff4\nselection-foreground=d8dee9\nselection-background=4c566a\n\n[key-bindings]\nscrollback-up-page=Control+Shift+k\nscrollback-down-page=Control+Shift+j\nscrollback-up-line=Control+Shift+b\nscrollback-down-line=Control+Shift+f\n\n[search-bindings]\n" }, { "alpha_fraction": 0.45782145857810974, "alphanum_fraction": 0.4602784514427185, "avg_line_length": 20.421052932739258, "blob_id": "68890b2ee7f9c6302b1323a3f33de3c9a8160296", "content_id": "070077238e5d2d5016e93dbac3f81249de31262e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 1221, "license_type": "no_license", "max_line_length": 60, "num_lines": 57, "path": "/home/.config/nvim/lua/plugins/wiki.lua", "repo_name": "cristobaltapia/dotfiles_main", "src_encoding": "UTF-8", "text": "return {\n -- Note-taking utilities\n {\n 'lervag/wiki.vim',\n -- ft = { \"wiki\" },\n dependencies = {\n { 'lervag/wiki-ft.vim' },\n },\n tag = \"v0.8\",\n cmd = \"WikiIndex\",\n init = function()\n vim.g.vimwiki_pubs_config = {\n vim.env.HOME .. \"/.config/pubs/main_library.conf\",\n vim.env.HOME .. \"/.config/pubs/misc_library.conf\",\n }\n\n vim.g.wiki_root = '~/Nextcloud/Notes'\n vim.g.wiki_link_creation = {\n wiki = {\n link_type = \"wiki\",\n url_extension = \".wiki\",\n url_transform = function(x)\n local name\n name, _ = string.gsub(string.lower(x), \" \", \"-\")\n return name\n end\n }\n }\n vim.g.wiki_filetypes = { 'wiki' }\n vim.g.wiki_select_method = 'fzf'\n end\n },\n {\n 'dkarter/bullets.vim',\n ft = {\n \"gitcommit\",\n \"mail\",\n \"markdown\",\n \"markdown.pandoc\",\n \"pandoc\",\n \"text\",\n \"wiki\",\n },\n init = function()\n vim.g.bullets_enabled_file_types = {\n 'gitcommit',\n 'mail',\n 'markdown',\n 'markdown.pandoc',\n 'pandoc',\n 'text',\n 'wiki',\n }\n end\n },\n}\n-- vim: set shiftwidth=2:\n" }, { "alpha_fraction": 0.6533742547035217, "alphanum_fraction": 0.6533742547035217, "avg_line_length": 9.1875, "blob_id": "f3e19f77c3946a0720bf681e0fc08180d6e41b93", "content_id": "74d1fbfa8148f3e0ce8c0ecdd61c1a53695505da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TOML", "length_bytes": 326, "license_type": "no_license", "max_line_length": 22, "num_lines": 32, "path": "/home/.config/starship.toml", "repo_name": "cristobaltapia/dotfiles_main", "src_encoding": "UTF-8", "text": "add_newline = true\nformat=\"\"\"\n$username\\\n$hostname\\\n$directory\\\n$package\\\n$python\\\n$julia\\\n$time\\\n$fill\\\n$git_branch\\\n$git_status\\\n$shell\\n\\\n$character\"\"\"\nright_format = \"\"\"\n$battery\n\"\"\"\n\n[username]\nshow_always = true\n\n[fill]\nsymbol = \" \"\n\n[hostname]\nssh_only = true\n\n[python]\ndetect_extensions = []\n\n[nodejs]\ndisabled = true\n" } ]
38
LcNdinda/Flask_Business
https://github.com/LcNdinda/Flask_Business
d68cb0b7ae4d16c6ed05b054db8721bff9a27589
56099d1ca1be433260b84de18d6c132edba2fecc
dd88b6bb1455e607ab1a950596b40b3950a0f4da
refs/heads/master
2020-04-10T06:30:53.515014
2018-12-07T17:48:14
2018-12-07T17:48:14
160,856,780
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5600000023841858, "alphanum_fraction": 0.5600000023841858, "avg_line_length": 19, "blob_id": "b0cd6bee2b5f30beb3541d70b8e8afe7ea8336bf", "content_id": "4ca1ace289a83bd9217b9deae7303ddaf373a05c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 100, "license_type": "no_license", "max_line_length": 27, "num_lines": 5, "path": "/run.py", "repo_name": "LcNdinda/Flask_Business", "src_encoding": "UTF-8", "text": "from app import app\n\nif __name__ == '__main__':\n app.secret_key='/QCS8Z5*pSzuZwI'\n app.run(debug=True)\n" } ]
1
arw180/websocket-examples
https://github.com/arw180/websocket-examples
1d3860bacd54663460645545cbcef7c203c13977
e8890503ebd342d19228d767db5432e3db94bbd0
66c0e819339f4808f047a0722093ff6e4764c754
refs/heads/master
2016-09-05T21:43:18.987707
2014-01-16T03:13:55
2014-01-16T03:13:55
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6483180522918701, "alphanum_fraction": 0.6544342637062073, "avg_line_length": 28.727272033691406, "blob_id": "c2beb1c0ac36116a4aca30b0a76fff764200df23", "content_id": "f9215c6407eafd06967c895c2f4df19a53229c4f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 327, "license_type": "no_license", "max_line_length": 61, "num_lines": 11, "path": "/README.md", "repo_name": "arw180/websocket-examples", "src_encoding": "UTF-8", "text": "websocket-examples\n==================\n\nJust some examples using WebSockets\n\nUse `pip install -r requirements.txt` to install dependencies\n\nSimple Synchronous (Blocking) Example\n----------------------------------------\n1. run `python simple_server.py` in a terminal\n2. run `python simple_blocking_client.py` in another terminal\n" }, { "alpha_fraction": 0.6507936716079712, "alphanum_fraction": 0.6556776762008667, "avg_line_length": 23.08823585510254, "blob_id": "2fe5f49d09ed0821260fd378d1ea4f0611d35371", "content_id": "ae37c41139660c897a3953284879e9c1a2f7e5ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 819, "license_type": "no_license", "max_line_length": 60, "num_lines": 34, "path": "/simple_server.py", "repo_name": "arw180/websocket-examples", "src_encoding": "UTF-8", "text": "\"\"\"\nserver.py\n\nWebsocket server\n\"\"\"\nimport json\n\nimport tornado.httpserver\nimport tornado.ioloop\nimport tornado.web\nimport tornado.websocket\n\nclass WSHandler(tornado.websocket.WebSocketHandler):\n def open(self):\n print (\"new connection\")\n\n def on_message(self, message):\n message = json.loads(message)\n if message['command'] == 'get_colors':\n self.write_message(\"Red, Blue, Green\")\n if message['command'] == 'get_shapes':\n self.write_message(\"Circle, Square, Triangle\")\n\n def on_close(self):\n print (\"connection closed\")\n\napplication = tornado.web.Application([\n (r'/ws', WSHandler),\n])\n\nif __name__ == \"__main__\":\n http_server = tornado.httpserver.HTTPServer(application)\n http_server.listen(8888)\n tornado.ioloop.IOLoop.instance().start()\n" }, { "alpha_fraction": 0.7003968358039856, "alphanum_fraction": 0.7083333134651184, "avg_line_length": 21.909090042114258, "blob_id": "c369ba6b3d39f440b4e82825af7f02a9a4b59070", "content_id": "35c8217dfd7d1d22da51042c4d89bcf538f778de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 504, "license_type": "no_license", "max_line_length": 73, "num_lines": 22, "path": "/simple_blocking_client.py", "repo_name": "arw180/websocket-examples", "src_encoding": "UTF-8", "text": "\"\"\"\nclient.py\n\nThis example shows how easy it is to create a client WebSocket connection\nin Python using a synchronous (i.e. blocking) architecture\n\"\"\"\nimport json\nfrom websocket import create_connection\n\nws = create_connection(\"ws://localhost:8888/ws\")\n\ndata = {'command': 'get_colors'}\nws.send(json.dumps(data))\nresult = ws.recv()\nprint (\"Received colors: %s\" % result)\n\ndata = {'command': 'get_shapes'}\nws.send(json.dumps(data))\nresult = ws.recv()\nprint (\"Received shapes: %s\" % result)\n\nws.close()\n" }, { "alpha_fraction": 0.593406617641449, "alphanum_fraction": 0.7362637519836426, "avg_line_length": 21.75, "blob_id": "2472c04753ef6832fe9ab1458a293fd58cb83bee", "content_id": "cc405392d2f71c53b28f175d2a91db34e8d75ad1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 91, "license_type": "no_license", "max_line_length": 37, "num_lines": 4, "path": "/requirements.txt", "repo_name": "arw180/websocket-examples", "src_encoding": "UTF-8", "text": "backports.ssl-match-hostname==3.4.0.2\ntornado==3.2\nwebsocket-client==0.12.0\nwsgiref==0.1.2\n" } ]
4